ib_srpt.c 99.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*
 * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/kthread.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
B
Bart Van Assche 已提交
44
#include <linux/inet.h>
B
Bart Van Assche 已提交
45
#include <rdma/ib_cache.h>
46
#include <scsi/scsi_proto.h>
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "ib_srpt.h"

/* Name of this kernel module. */
#define DRV_NAME		"ib_srpt"
#define DRV_VERSION		"2.0.0"
#define DRV_RELDATE		"2011-02-14"

#define SRPT_ID_STRING	"Linux SRP target"

#undef pr_fmt
#define pr_fmt(fmt) DRV_NAME " " fmt

MODULE_AUTHOR("Vu Pham and Bart Van Assche");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
		   "v" DRV_VERSION " (" DRV_RELDATE ")");
MODULE_LICENSE("Dual BSD/GPL");

/*
 * Global Variables
 */

static u64 srpt_service_guid;
72 73
static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
74 75 76 77 78 79 80 81 82 83 84

static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
module_param(srp_max_req_size, int, 0444);
MODULE_PARM_DESC(srp_max_req_size,
		 "Maximum size of SRP request messages in bytes.");

static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
		 "Shared receive queue (SRQ) size.");

85
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
86 87 88 89 90 91 92 93 94 95
{
	return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
		  0444);
MODULE_PARM_DESC(srpt_service_guid,
		 "Using this value for ioc_guid, id_ext, and cm_listen_id"
		 " instead of using the node_guid of the first HCA.");

static struct ib_client srpt_client;
B
Bart Van Assche 已提交
96 97 98 99 100
/* Protects both rdma_cm_port and rdma_cm_id. */
static DEFINE_MUTEX(rdma_cm_mutex);
/* Port number RDMA/CM will bind to. */
static u16 rdma_cm_port;
static struct rdma_cm_id *rdma_cm_id;
101
static void srpt_release_cmd(struct se_cmd *se_cmd);
102
static void srpt_free_ch(struct kref *kref);
103
static int srpt_queue_status(struct se_cmd *cmd);
104 105
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
106
static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
107

108 109 110
/*
 * The only allowed channel state changes are those that change the channel
 * state into a state with a higher numerical value. Hence the new > prev test.
111
 */
112
static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
113 114 115
{
	unsigned long flags;
	enum rdma_ch_state prev;
116
	bool changed = false;
117 118 119

	spin_lock_irqsave(&ch->spinlock, flags);
	prev = ch->state;
120
	if (new > prev) {
121
		ch->state = new;
122 123
		changed = true;
	}
124
	spin_unlock_irqrestore(&ch->spinlock, flags);
125 126

	return changed;
127 128 129
}

/**
130 131 132
 * srpt_event_handler - asynchronous IB event callback function
 * @handler: IB event handler registered by ib_register_event_handler().
 * @event: Description of the event that occurred.
133 134 135 136 137 138 139 140 141 142 143
 *
 * Callback function called by the InfiniBand core when an asynchronous IB
 * event occurs. This callback may occur in interrupt context. See also
 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
 * Architecture Specification.
 */
static void srpt_event_handler(struct ib_event_handler *handler,
			       struct ib_event *event)
{
	struct srpt_device *sdev;
	struct srpt_port *sport;
144
	u8 port_num;
145 146 147 148 149 150

	sdev = ib_get_client_data(event->device, &srpt_client);
	if (!sdev || sdev->device != event->device)
		return;

	pr_debug("ASYNC event= %d on device= %s\n", event->event,
151
		 sdev->device->name);
152 153 154

	switch (event->event) {
	case IB_EVENT_PORT_ERR:
155 156 157
		port_num = event->element.port_num - 1;
		if (port_num < sdev->device->phys_port_cnt) {
			sport = &sdev->port[port_num];
158 159
			sport->lid = 0;
			sport->sm_lid = 0;
160 161 162 163
		} else {
			WARN(true, "event %d: port_num %d out of range 1..%d\n",
			     event->event, port_num + 1,
			     sdev->device->phys_port_cnt);
164 165 166 167 168 169 170
		}
		break;
	case IB_EVENT_PORT_ACTIVE:
	case IB_EVENT_LID_CHANGE:
	case IB_EVENT_PKEY_CHANGE:
	case IB_EVENT_SM_CHANGE:
	case IB_EVENT_CLIENT_REREGISTER:
D
Doug Ledford 已提交
171
	case IB_EVENT_GID_CHANGE:
172
		/* Refresh port data asynchronously. */
173 174 175
		port_num = event->element.port_num - 1;
		if (port_num < sdev->device->phys_port_cnt) {
			sport = &sdev->port[port_num];
176 177
			if (!sport->lid && !sport->sm_lid)
				schedule_work(&sport->work);
178 179 180 181
		} else {
			WARN(true, "event %d: port_num %d out of range 1..%d\n",
			     event->event, port_num + 1,
			     sdev->device->phys_port_cnt);
182 183 184
		}
		break;
	default:
185
		pr_err("received unrecognized IB event %d\n", event->event);
186 187 188 189 190
		break;
	}
}

/**
191 192 193
 * srpt_srq_event - SRQ event callback function
 * @event: Description of the event that occurred.
 * @ctx: Context pointer specified at SRQ creation time.
194 195 196
 */
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
197
	pr_debug("SRQ event %d\n", event->event);
198 199
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static const char *get_ch_state_name(enum rdma_ch_state s)
{
	switch (s) {
	case CH_CONNECTING:
		return "connecting";
	case CH_LIVE:
		return "live";
	case CH_DISCONNECTING:
		return "disconnecting";
	case CH_DRAINING:
		return "draining";
	case CH_DISCONNECTED:
		return "disconnected";
	}
	return "???";
}

217
/**
218 219 220
 * srpt_qp_event - QP event callback function
 * @event: Description of the event that occurred.
 * @ch: SRPT RDMA channel.
221 222 223
 */
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
224 225
	pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
		 event->event, ch, ch->sess_name, ch->state);
226 227 228

	switch (event->event) {
	case IB_EVENT_COMM_EST:
B
Bart Van Assche 已提交
229 230 231 232
		if (ch->using_rdma_cm)
			rdma_notify(ch->rdma_cm.cm_id, event->event);
		else
			ib_cm_notify(ch->ib_cm.cm_id, event->event);
233 234
		break;
	case IB_EVENT_QP_LAST_WQE_REACHED:
235 236 237
		pr_debug("%s-%d, state %s: received Last WQE event.\n",
			 ch->sess_name, ch->qp->qp_num,
			 get_ch_state_name(ch->state));
238 239
		break;
	default:
240
		pr_err("received unrecognized IB QP event %d\n", event->event);
241 242 243 244 245
		break;
	}
}

/**
246 247
 * srpt_set_ioc - initialize a IOUnitInfo structure
 * @c_list: controller list.
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
 * @slot: one-based slot number.
 * @value: four-bit value.
 *
 * Copies the lowest four bits of value in element slot of the array of four
 * bit elements called c_list (controller list). The index slot is one-based.
 */
static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
{
	u16 id;
	u8 tmp;

	id = (slot - 1) / 2;
	if (slot & 0x1) {
		tmp = c_list[id] & 0xf;
		c_list[id] = (value << 4) | tmp;
	} else {
		tmp = c_list[id] & 0xf0;
		c_list[id] = (value & 0xf) | tmp;
	}
}

/**
270 271
 * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
 * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
272 273 274 275 276 277 278 279 280
 *
 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
 * Specification.
 */
static void srpt_get_class_port_info(struct ib_dm_mad *mad)
{
	struct ib_class_port_info *cif;

	cif = (struct ib_class_port_info *)mad->data;
281
	memset(cif, 0, sizeof(*cif));
282 283 284
	cif->base_version = 1;
	cif->class_version = 1;

285
	ib_set_cpi_resp_time(cif, 20);
286 287 288 289
	mad->mad_hdr.status = 0;
}

/**
290 291
 * srpt_get_iou - write IOUnitInfo to a management datagram
 * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
292 293 294 295 296 297 298 299 300 301 302
 *
 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
 * Specification. See also section B.7, table B.6 in the SRP r16a document.
 */
static void srpt_get_iou(struct ib_dm_mad *mad)
{
	struct ib_dm_iou_info *ioui;
	u8 slot;
	int i;

	ioui = (struct ib_dm_iou_info *)mad->data;
303
	ioui->change_id = cpu_to_be16(1);
304 305 306 307 308 309 310 311 312 313 314
	ioui->max_controllers = 16;

	/* set present for slot 1 and empty for the rest */
	srpt_set_ioc(ioui->controller_list, 1, 1);
	for (i = 1, slot = 2; i < 16; i++, slot++)
		srpt_set_ioc(ioui->controller_list, slot, 0);

	mad->mad_hdr.status = 0;
}

/**
315 316 317 318
 * srpt_get_ioc - write IOControllerprofile to a management datagram
 * @sport: HCA port through which the MAD has been received.
 * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
 * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
319 320 321 322 323 324 325 326 327 328
 *
 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
 * Architecture Specification. See also section B.7, table B.7 in the SRP
 * r16a document.
 */
static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
			 struct ib_dm_mad *mad)
{
	struct srpt_device *sdev = sport->sdev;
	struct ib_dm_ioc_profile *iocp;
329
	int send_queue_depth;
330 331 332 333 334

	iocp = (struct ib_dm_ioc_profile *)mad->data;

	if (!slot || slot > 16) {
		mad->mad_hdr.status
335
			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
336 337 338 339 340
		return;
	}

	if (slot > 2) {
		mad->mad_hdr.status
341
			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
342 343 344
		return;
	}

345 346 347
	if (sdev->use_srq)
		send_queue_depth = sdev->srq_size;
	else
348
		send_queue_depth = min(MAX_SRPT_RQ_SIZE,
349 350
				       sdev->device->attrs.max_qp_wr);

351
	memset(iocp, 0, sizeof(*iocp));
352 353
	strcpy(iocp->id_string, SRPT_ID_STRING);
	iocp->guid = cpu_to_be64(srpt_service_guid);
354 355 356 357
	iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
	iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
	iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
	iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
358
	iocp->subsys_device_id = 0x0;
359 360 361 362
	iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
	iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
	iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
	iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
363
	iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
364 365 366 367 368 369 370 371 372 373 374 375
	iocp->rdma_read_depth = 4;
	iocp->send_size = cpu_to_be32(srp_max_req_size);
	iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
					  1U << 24));
	iocp->num_svc_entries = 1;
	iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
		SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;

	mad->mad_hdr.status = 0;
}

/**
376 377 378 379 380 381
 * srpt_get_svc_entries - write ServiceEntries to a management datagram
 * @ioc_guid: I/O controller GUID to use in reply.
 * @slot: I/O controller number.
 * @hi: End of the range of service entries to be specified in the reply.
 * @lo: Start of the range of service entries to be specified in the reply..
 * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
382 383 384 385 386 387 388 389 390 391 392 393 394
 *
 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
 * Specification. See also section B.7, table B.8 in the SRP r16a document.
 */
static void srpt_get_svc_entries(u64 ioc_guid,
				 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
{
	struct ib_dm_svc_entries *svc_entries;

	WARN_ON(!ioc_guid);

	if (!slot || slot > 16) {
		mad->mad_hdr.status
395
			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
396 397 398 399 400
		return;
	}

	if (slot > 2 || lo > hi || hi > 1) {
		mad->mad_hdr.status
401
			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
402 403 404 405
		return;
	}

	svc_entries = (struct ib_dm_svc_entries *)mad->data;
406
	memset(svc_entries, 0, sizeof(*svc_entries));
407 408 409 410 411 412 413 414 415 416 417
	svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
	snprintf(svc_entries->service_entries[0].name,
		 sizeof(svc_entries->service_entries[0].name),
		 "%s%016llx",
		 SRP_SERVICE_NAME_PREFIX,
		 ioc_guid);

	mad->mad_hdr.status = 0;
}

/**
418 419
 * srpt_mgmt_method_get - process a received management datagram
 * @sp:      HCA port through which the MAD has been received.
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
 * @rq_mad:  received MAD.
 * @rsp_mad: response MAD.
 */
static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
				 struct ib_dm_mad *rsp_mad)
{
	u16 attr_id;
	u32 slot;
	u8 hi, lo;

	attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
	switch (attr_id) {
	case DM_ATTR_CLASS_PORT_INFO:
		srpt_get_class_port_info(rsp_mad);
		break;
	case DM_ATTR_IOU_INFO:
		srpt_get_iou(rsp_mad);
		break;
	case DM_ATTR_IOC_PROFILE:
		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
		srpt_get_ioc(sp, slot, rsp_mad);
		break;
	case DM_ATTR_SVC_ENTRIES:
		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
		hi = (u8) ((slot >> 8) & 0xff);
		lo = (u8) (slot & 0xff);
		slot = (u16) ((slot >> 16) & 0xffff);
		srpt_get_svc_entries(srpt_service_guid,
				     slot, hi, lo, rsp_mad);
		break;
	default:
		rsp_mad->mad_hdr.status =
452
		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
453 454 455 456 457
		break;
	}
}

/**
458 459 460
 * srpt_mad_send_handler - MAD send completion callback
 * @mad_agent: Return value of ib_register_mad_agent().
 * @mad_wc: Work completion reporting that the MAD has been sent.
461 462 463 464
 */
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
				  struct ib_mad_send_wc *mad_wc)
{
465
	rdma_destroy_ah(mad_wc->send_buf->ah);
466 467 468 469
	ib_free_send_mad(mad_wc->send_buf);
}

/**
470 471 472 473
 * srpt_mad_recv_handler - MAD reception callback function
 * @mad_agent: Return value of ib_register_mad_agent().
 * @send_buf: Not used.
 * @mad_wc: Work completion reporting that a MAD has been received.
474 475
 */
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
476
				  struct ib_mad_send_buf *send_buf,
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
				  struct ib_mad_recv_wc *mad_wc)
{
	struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
	struct ib_ah *ah;
	struct ib_mad_send_buf *rsp;
	struct ib_dm_mad *dm_mad;

	if (!mad_wc || !mad_wc->recv_buf.mad)
		return;

	ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
				  mad_wc->recv_buf.grh, mad_agent->port_num);
	if (IS_ERR(ah))
		goto err;

	BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);

	rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
				 mad_wc->wc->pkey_index, 0,
				 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
497 498
				 GFP_KERNEL,
				 IB_MGMT_BASE_VERSION);
499 500 501 502 503 504
	if (IS_ERR(rsp))
		goto err_rsp;

	rsp->ah = ah;

	dm_mad = rsp->mad;
505
	memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
506 507 508 509 510 511 512 513 514
	dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
	dm_mad->mad_hdr.status = 0;

	switch (mad_wc->recv_buf.mad->mad_hdr.method) {
	case IB_MGMT_METHOD_GET:
		srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
		break;
	case IB_MGMT_METHOD_SET:
		dm_mad->mad_hdr.status =
515
		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
516 517 518
		break;
	default:
		dm_mad->mad_hdr.status =
519
		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
520 521 522 523 524 525 526 527 528 529 530 531
		break;
	}

	if (!ib_post_send_mad(rsp, NULL)) {
		ib_free_recv_mad(mad_wc);
		/* will destroy_ah & free_send_mad in send completion */
		return;
	}

	ib_free_send_mad(rsp);

err_rsp:
532
	rdma_destroy_ah(ah);
533 534 535 536
err:
	ib_free_recv_mad(mad_wc);
}

537 538 539 540 541 542 543 544 545
static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
{
	const __be16 *g = (const __be16 *)guid;

	return snprintf(buf, size, "%04x:%04x:%04x:%04x",
			be16_to_cpu(g[0]), be16_to_cpu(g[1]),
			be16_to_cpu(g[2]), be16_to_cpu(g[3]));
}

546
/**
547 548
 * srpt_refresh_port - configure a HCA port
 * @sport: SRPT HCA port.
549 550 551 552 553 554 555 556 557 558 559 560 561 562
 *
 * Enable InfiniBand management datagram processing, update the cached sm_lid,
 * lid and gid values, and register a callback function for processing MADs
 * on the specified port.
 *
 * Note: It is safe to call this function more than once for the same port.
 */
static int srpt_refresh_port(struct srpt_port *sport)
{
	struct ib_mad_reg_req reg_req;
	struct ib_port_modify port_modify;
	struct ib_port_attr port_attr;
	int ret;

563
	memset(&port_modify, 0, sizeof(port_modify));
564 565 566 567 568 569 570 571 572 573 574 575 576 577
	port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
	port_modify.clr_port_cap_mask = 0;

	ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
	if (ret)
		goto err_mod_port;

	ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
	if (ret)
		goto err_query_port;

	sport->sm_lid = port_attr.sm_lid;
	sport->lid = port_attr.lid;

578
	ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
579 580 581
	if (ret)
		goto err_query_port;

582
	sport->port_guid_wwn.priv = sport;
583 584
	srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
			 &sport->gid.global.interface_id);
585 586 587 588 589
	sport->port_gid_wwn.priv = sport;
	snprintf(sport->port_gid, sizeof(sport->port_gid),
		 "0x%016llx%016llx",
		 be64_to_cpu(sport->gid.global.subnet_prefix),
		 be64_to_cpu(sport->gid.global.interface_id));
590

591
	if (!sport->mad_agent) {
592
		memset(&reg_req, 0, sizeof(reg_req));
593 594 595 596 597 598 599 600 601 602 603
		reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
		reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);

		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
							 sport->port,
							 IB_QPT_GSI,
							 &reg_req, 0,
							 srpt_mad_send_handler,
							 srpt_mad_recv_handler,
604
							 sport, 0);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
		if (IS_ERR(sport->mad_agent)) {
			ret = PTR_ERR(sport->mad_agent);
			sport->mad_agent = NULL;
			goto err_query_port;
		}
	}

	return 0;

err_query_port:

	port_modify.set_port_cap_mask = 0;
	port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
	ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);

err_mod_port:

	return ret;
}

/**
626 627
 * srpt_unregister_mad_agent - unregister MAD callback functions
 * @sdev: SRPT HCA pointer.
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
 *
 * Note: It is safe to call this function more than once for the same device.
 */
static void srpt_unregister_mad_agent(struct srpt_device *sdev)
{
	struct ib_port_modify port_modify = {
		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
	};
	struct srpt_port *sport;
	int i;

	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
		sport = &sdev->port[i - 1];
		WARN_ON(sport->port != i);
		if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
643
			pr_err("disabling MAD processing failed.\n");
644 645 646 647 648 649 650 651
		if (sport->mad_agent) {
			ib_unregister_mad_agent(sport->mad_agent);
			sport->mad_agent = NULL;
		}
	}
}

/**
652 653 654 655 656
 * srpt_alloc_ioctx - allocate a SRPT I/O context structure
 * @sdev: SRPT HCA pointer.
 * @ioctx_size: I/O context size.
 * @dma_size: Size of I/O context DMA buffer.
 * @dir: DMA data direction.
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
 */
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
					   int ioctx_size, int dma_size,
					   enum dma_data_direction dir)
{
	struct srpt_ioctx *ioctx;

	ioctx = kmalloc(ioctx_size, GFP_KERNEL);
	if (!ioctx)
		goto err;

	ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
	if (!ioctx->buf)
		goto err_free_ioctx;

	ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
	if (ib_dma_mapping_error(sdev->device, ioctx->dma))
		goto err_free_buf;

	return ioctx;

err_free_buf:
	kfree(ioctx->buf);
err_free_ioctx:
	kfree(ioctx);
err:
	return NULL;
}

/**
687 688 689 690 691
 * srpt_free_ioctx - free a SRPT I/O context structure
 * @sdev: SRPT HCA pointer.
 * @ioctx: I/O context pointer.
 * @dma_size: Size of I/O context DMA buffer.
 * @dir: DMA data direction.
692 693 694 695 696 697 698 699 700 701 702 703 704
 */
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
			    int dma_size, enum dma_data_direction dir)
{
	if (!ioctx)
		return;

	ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
	kfree(ioctx->buf);
	kfree(ioctx);
}

/**
705
 * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
 * @sdev:       Device to allocate the I/O context ring for.
 * @ring_size:  Number of elements in the I/O context ring.
 * @ioctx_size: I/O context size.
 * @dma_size:   DMA buffer size.
 * @dir:        DMA data direction.
 */
static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
				int ring_size, int ioctx_size,
				int dma_size, enum dma_data_direction dir)
{
	struct srpt_ioctx **ring;
	int i;

	WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
		&& ioctx_size != sizeof(struct srpt_send_ioctx));

722
	ring = kmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
723 724 725 726 727 728 729 730 731 732 733 734 735 736
	if (!ring)
		goto out;
	for (i = 0; i < ring_size; ++i) {
		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
		if (!ring[i])
			goto err;
		ring[i]->index = i;
	}
	goto out;

err:
	while (--i >= 0)
		srpt_free_ioctx(sdev, ring[i], dma_size, dir);
	kfree(ring);
737
	ring = NULL;
738 739 740 741 742
out:
	return ring;
}

/**
743 744 745 746 747 748
 * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
 * @ioctx_ring: I/O context ring to be freed.
 * @sdev: SRPT HCA pointer.
 * @ring_size: Number of ring elements.
 * @dma_size: Size of I/O context DMA buffer.
 * @dir: DMA data direction.
749 750 751 752 753 754 755
 */
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
				 struct srpt_device *sdev, int ring_size,
				 int dma_size, enum dma_data_direction dir)
{
	int i;

756 757 758
	if (!ioctx_ring)
		return;

759 760 761 762 763 764
	for (i = 0; i < ring_size; ++i)
		srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
	kfree(ioctx_ring);
}

/**
765 766 767
 * srpt_set_cmd_state - set the state of a SCSI command
 * @ioctx: Send I/O context.
 * @new: New I/O context state.
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
 *
 * Does not modify the state of aborted commands. Returns the previous command
 * state.
 */
static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
						  enum srpt_command_state new)
{
	enum srpt_command_state previous;

	previous = ioctx->state;
	if (previous != SRPT_STATE_DONE)
		ioctx->state = new;

	return previous;
}

/**
785 786 787 788
 * srpt_test_and_set_cmd_state - test and set the state of a command
 * @ioctx: Send I/O context.
 * @old: Current I/O context state.
 * @new: New I/O context state.
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
 *
 * Returns true if and only if the previous command state was equal to 'old'.
 */
static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
					enum srpt_command_state old,
					enum srpt_command_state new)
{
	enum srpt_command_state previous;

	WARN_ON(!ioctx);
	WARN_ON(old == SRPT_STATE_DONE);
	WARN_ON(new == SRPT_STATE_NEW);

	previous = ioctx->state;
	if (previous == old)
		ioctx->state = new;
805

806 807 808 809
	return previous == old;
}

/**
810 811 812 813
 * srpt_post_recv - post an IB receive request
 * @sdev: SRPT HCA pointer.
 * @ch: SRPT RDMA channel.
 * @ioctx: Receive I/O context pointer.
814
 */
815
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
816 817 818 819 820 821 822 823
			  struct srpt_recv_ioctx *ioctx)
{
	struct ib_sge list;
	struct ib_recv_wr wr, *bad_wr;

	BUG_ON(!sdev);
	list.addr = ioctx->ioctx.dma;
	list.length = srp_max_req_size;
B
Bart Van Assche 已提交
824
	list.lkey = sdev->lkey;
825

826 827
	ioctx->ioctx.cqe.done = srpt_recv_done;
	wr.wr_cqe = &ioctx->ioctx.cqe;
828 829 830 831
	wr.next = NULL;
	wr.sg_list = &list;
	wr.num_sge = 1;

832 833 834 835
	if (sdev->use_srq)
		return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
	else
		return ib_post_recv(ch->qp, &wr, &bad_wr);
836 837
}

838
/**
839 840
 * srpt_zerolength_write - perform a zero-length RDMA write
 * @ch: SRPT RDMA channel.
841 842 843 844 845 846 847 848
 *
 * A quote from the InfiniBand specification: C9-88: For an HCA responder
 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
 * request, the R_Key shall not be validated, even if the request includes
 * Immediate data.
 */
static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
849 850 851
	struct ib_send_wr *bad_wr;
	struct ib_rdma_wr wr = {
		.wr = {
852 853
			.next		= NULL,
			{ .wr_cqe	= &ch->zw_cqe, },
854 855 856 857
			.opcode		= IB_WR_RDMA_WRITE,
			.send_flags	= IB_SEND_SIGNALED,
		}
	};
858

859 860 861
	pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
		 ch->qp->qp_num);

862
	return ib_post_send(ch->qp, &wr.wr, &bad_wr);
863 864 865 866 867 868
}

static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct srpt_rdma_ch *ch = cq->cq_context;

869 870 871
	pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
		 wc->status);

872 873 874 875 876 877
	if (wc->status == IB_WC_SUCCESS) {
		srpt_process_wait_list(ch);
	} else {
		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
			schedule_work(&ch->release_work);
		else
878 879
			pr_debug("%s-%d: already disconnected.\n",
				 ch->sess_name, ch->qp->qp_num);
880
	}
881 882
}

883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
		struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
		unsigned *sg_cnt)
{
	enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
	struct srpt_rdma_ch *ch = ioctx->ch;
	struct scatterlist *prev = NULL;
	unsigned prev_nents;
	int ret, i;

	if (nbufs == 1) {
		ioctx->rw_ctxs = &ioctx->s_rw_ctx;
	} else {
		ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
			GFP_KERNEL);
		if (!ioctx->rw_ctxs)
			return -ENOMEM;
	}

	for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
		u64 remote_addr = be64_to_cpu(db->va);
		u32 size = be32_to_cpu(db->len);
		u32 rkey = be32_to_cpu(db->key);

		ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
				i < nbufs - 1);
		if (ret)
			goto unwind;

		ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
				ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
		if (ret < 0) {
			target_free_sgl(ctx->sg, ctx->nents);
			goto unwind;
		}

		ioctx->n_rdma += ret;
		ioctx->n_rw_ctx++;

		if (prev) {
			sg_unmark_end(&prev[prev_nents - 1]);
			sg_chain(prev, prev_nents + 1, ctx->sg);
		} else {
			*sg = ctx->sg;
		}

		prev = ctx->sg;
		prev_nents = ctx->nents;

		*sg_cnt += ctx->nents;
	}

	return 0;

unwind:
	while (--i >= 0) {
		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];

		rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
				ctx->sg, ctx->nents, dir);
		target_free_sgl(ctx->sg, ctx->nents);
	}
	if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
		kfree(ioctx->rw_ctxs);
	return ret;
}

static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
				    struct srpt_send_ioctx *ioctx)
{
	enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
	int i;

	for (i = 0; i < ioctx->n_rw_ctx; i++) {
		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];

		rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
				ctx->sg, ctx->nents, dir);
		target_free_sgl(ctx->sg, ctx->nents);
	}

	if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
		kfree(ioctx->rw_ctxs);
}

static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
{
	/*
	 * The pointer computations below will only be compiled correctly
	 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
	 * whether srp_cmd::add_data has been declared as a byte pointer.
	 */
	BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
		     !__same_type(srp_cmd->add_data[0], (u8)0));

	/*
	 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
	 * CDB LENGTH' field are reserved and the size in bytes of this field
	 * is four times the value specified in bits 3..7. Hence the "& ~3".
	 */
	return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
}

987
/**
988
 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
989 990 991 992
 * @ioctx: Pointer to the I/O context associated with the request.
 * @srp_cmd: Pointer to the SRP_CMD request data.
 * @dir: Pointer to the variable to which the transfer direction will be
 *   written.
993 994
 * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
 * @sg_cnt: [out] length of @sg.
995 996 997 998 999 1000 1001 1002 1003
 * @data_len: Pointer to the variable to which the total data length of all
 *   descriptors in the SRP_CMD request will be written.
 *
 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
 *
 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
 * -ENOMEM when memory allocation fails and zero upon success.
 */
static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
1004 1005
		struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
		struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
{
	BUG_ON(!dir);
	BUG_ON(!data_len);

	/*
	 * The lower four bits of the buffer format field contain the DATA-IN
	 * buffer descriptor format, and the highest four bits contain the
	 * DATA-OUT buffer descriptor format.
	 */
	if (srp_cmd->buf_fmt & 0xf)
		/* DATA-IN: transfer data from target to initiator (read). */
		*dir = DMA_FROM_DEVICE;
	else if (srp_cmd->buf_fmt >> 4)
		/* DATA-OUT: transfer data from initiator to target (write). */
		*dir = DMA_TO_DEVICE;
1021 1022 1023 1024 1025
	else
		*dir = DMA_NONE;

	/* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
	ioctx->cmd.data_direction = *dir;
1026 1027 1028

	if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
	    ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1029
	    	struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1030 1031

		*data_len = be32_to_cpu(db->len);
1032
		return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1033 1034
	} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
		   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
1035 1036 1037
		struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
		int nbufs = be32_to_cpu(idb->table_desc.len) /
				sizeof(struct srp_direct_buf);
1038

1039
		if (nbufs >
1040
		    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1041
			pr_err("received unsupported SRP_CMD request"
1042 1043 1044 1045
			       " type (%u out + %u in != %u / %zu)\n",
			       srp_cmd->data_out_desc_cnt,
			       srp_cmd->data_in_desc_cnt,
			       be32_to_cpu(idb->table_desc.len),
1046 1047
			       sizeof(struct srp_direct_buf));
			return -EINVAL;
1048 1049 1050
		}

		*data_len = be32_to_cpu(idb->len);
1051 1052 1053 1054 1055
		return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
				sg, sg_cnt);
	} else {
		*data_len = 0;
		return 0;
1056 1057 1058 1059
	}
}

/**
1060 1061 1062
 * srpt_init_ch_qp - initialize queue pair attributes
 * @ch: SRPT RDMA channel.
 * @qp: Queue pair pointer.
1063 1064 1065 1066 1067 1068 1069 1070 1071
 *
 * Initialized the attributes of queue pair 'qp' by allowing local write,
 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
 */
static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
	struct ib_qp_attr *attr;
	int ret;

B
Bart Van Assche 已提交
1072 1073
	WARN_ON_ONCE(ch->using_rdma_cm);

1074
	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1075 1076 1077 1078
	if (!attr)
		return -ENOMEM;

	attr->qp_state = IB_QPS_INIT;
1079
	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1080
	attr->port_num = ch->sport->port;
B
Bart Van Assche 已提交
1081 1082 1083 1084 1085 1086

	ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
				  ch->pkey, &attr->pkey_index);
	if (ret < 0)
		pr_err("Translating pkey %#x failed (%d) - using index 0\n",
		       ch->pkey, ret);
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096

	ret = ib_modify_qp(qp, attr,
			   IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
			   IB_QP_PKEY_INDEX);

	kfree(attr);
	return ret;
}

/**
1097
 * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
 * @ch: channel of the queue pair.
 * @qp: queue pair to change the state of.
 *
 * Returns zero upon success and a negative value upon failure.
 *
 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
 * If this structure ever becomes larger, it might be necessary to allocate
 * it dynamically instead of on the stack.
 */
static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
	struct ib_qp_attr qp_attr;
	int attr_mask;
	int ret;

B
Bart Van Assche 已提交
1113 1114
	WARN_ON_ONCE(ch->using_rdma_cm);

1115
	qp_attr.qp_state = IB_QPS_RTR;
1116
	ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	if (ret)
		goto out;

	qp_attr.max_dest_rd_atomic = 4;

	ret = ib_modify_qp(qp, &qp_attr, attr_mask);

out:
	return ret;
}

/**
1129
 * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
 * @ch: channel of the queue pair.
 * @qp: queue pair to change the state of.
 *
 * Returns zero upon success and a negative value upon failure.
 *
 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
 * If this structure ever becomes larger, it might be necessary to allocate
 * it dynamically instead of on the stack.
 */
static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
	struct ib_qp_attr qp_attr;
	int attr_mask;
	int ret;

	qp_attr.qp_state = IB_QPS_RTS;
1146
	ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	if (ret)
		goto out;

	qp_attr.max_rd_atomic = 4;

	ret = ib_modify_qp(qp, &qp_attr, attr_mask);

out:
	return ret;
}

/**
1159 1160
 * srpt_ch_qp_err - set the channel queue pair state to 'error'
 * @ch: SRPT RDMA channel.
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
 */
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
	struct ib_qp_attr qp_attr;

	qp_attr.qp_state = IB_QPS_ERR;
	return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
}

/**
1171 1172
 * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
 * @ch: SRPT RDMA channel.
1173 1174 1175 1176
 */
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
	struct srpt_send_ioctx *ioctx;
1177
	unsigned long flags;
1178 1179 1180

	BUG_ON(!ch);

1181 1182 1183 1184 1185 1186
	ioctx = NULL;
	spin_lock_irqsave(&ch->spinlock, flags);
	if (!list_empty(&ch->free_list)) {
		ioctx = list_first_entry(&ch->free_list,
					 struct srpt_send_ioctx, free_list);
		list_del(&ioctx->free_list);
1187
	}
1188 1189 1190 1191 1192 1193
	spin_unlock_irqrestore(&ch->spinlock, flags);

	if (!ioctx)
		return ioctx;

	BUG_ON(ioctx->ch != ch);
1194
	ioctx->state = SRPT_STATE_NEW;
1195
	ioctx->n_rdma = 0;
1196
	ioctx->n_rw_ctx = 0;
1197 1198 1199 1200 1201 1202 1203
	ioctx->queue_status_only = false;
	/*
	 * transport_init_se_cmd() does not initialize all fields, so do it
	 * here.
	 */
	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1204 1205 1206 1207 1208

	return ioctx;
}

/**
1209
 * srpt_abort_cmd - abort a SCSI command
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
 * @ioctx:   I/O context associated with the SCSI command.
 */
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
	enum srpt_command_state state;

	BUG_ON(!ioctx);

	/*
	 * If the command is in a state where the target core is waiting for
1220
	 * the ib_srpt driver, change the state to the next state.
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	 */

	state = ioctx->state;
	switch (state) {
	case SRPT_STATE_NEED_DATA:
		ioctx->state = SRPT_STATE_DATA_IN;
		break;
	case SRPT_STATE_CMD_RSP_SENT:
	case SRPT_STATE_MGMT_RSP_SENT:
		ioctx->state = SRPT_STATE_DONE;
		break;
	default:
1233 1234
		WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
			  __func__, state);
1235 1236 1237
		break;
	}

1238 1239
	pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
		 ioctx->state, ioctx->cmd.tag);
1240 1241 1242 1243 1244

	switch (state) {
	case SRPT_STATE_NEW:
	case SRPT_STATE_DATA_IN:
	case SRPT_STATE_MGMT:
1245
	case SRPT_STATE_DONE:
1246 1247 1248 1249 1250 1251
		/*
		 * Do nothing - defer abort processing until
		 * srpt_queue_response() is invoked.
		 */
		break;
	case SRPT_STATE_NEED_DATA:
1252 1253 1254
		pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
		transport_generic_request_failure(&ioctx->cmd,
					TCM_CHECK_CONDITION_ABORT_CMD);
1255 1256 1257 1258 1259 1260
		break;
	case SRPT_STATE_CMD_RSP_SENT:
		/*
		 * SRP_RSP sending failed or the SRP_RSP send completion has
		 * not been received in time.
		 */
1261
		transport_generic_free_cmd(&ioctx->cmd, 0);
1262 1263
		break;
	case SRPT_STATE_MGMT_RSP_SENT:
1264
		transport_generic_free_cmd(&ioctx->cmd, 0);
1265 1266
		break;
	default:
G
Grant Grundler 已提交
1267
		WARN(1, "Unexpected command state (%d)", state);
1268 1269 1270 1271 1272 1273 1274
		break;
	}

	return state;
}

/**
1275 1276 1277 1278
 * srpt_rdma_read_done - RDMA read completion callback
 * @cq: Completion queue.
 * @wc: Work completion.
 *
1279 1280
 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
 * the data that has been transferred via IB RDMA had to be postponed until the
1281
 * check_stop_free() callback.  None of this is necessary anymore and needs to
1282
 * be cleaned up.
1283
 */
1284
static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1285
{
1286 1287
	struct srpt_rdma_ch *ch = cq->cq_context;
	struct srpt_send_ioctx *ioctx =
1288
		container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1289

1290 1291
	WARN_ON(ioctx->n_rdma <= 0);
	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1292
	ioctx->n_rdma = 0;
1293

1294 1295 1296 1297 1298
	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
			ioctx, wc->status);
		srpt_abort_cmd(ioctx);
		return;
1299
	}
1300 1301 1302 1303 1304 1305

	if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
					SRPT_STATE_DATA_IN))
		target_execute_cmd(&ioctx->cmd);
	else
		pr_err("%s[%d]: wrong state = %d\n", __func__,
1306
		       __LINE__, ioctx->state);
1307 1308 1309
}

/**
1310
 * srpt_build_cmd_rsp - build a SRP_RSP response
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
 * @ch: RDMA channel through which the request has been received.
 * @ioctx: I/O context associated with the SRP_CMD request. The response will
 *   be built in the buffer ioctx->buf points at and hence this function will
 *   overwrite the request data.
 * @tag: tag of the request for which this response is being generated.
 * @status: value for the STATUS field of the SRP_RSP information unit.
 *
 * Returns the size in bytes of the SRP_RSP response.
 *
 * An SRP_RSP response contains a SCSI status or service response. See also
 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
 * response. See also SPC-2 for more information about sense data.
 */
static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
			      struct srpt_send_ioctx *ioctx, u64 tag,
			      int status)
{
	struct srp_rsp *srp_rsp;
	const u8 *sense_data;
	int sense_data_len, max_sense_len;

	/*
	 * The lowest bit of all SAM-3 status codes is zero (see also
	 * paragraph 5.3 in SAM-3).
	 */
	WARN_ON(status & 1);

	srp_rsp = ioctx->ioctx.buf;
	BUG_ON(!srp_rsp);

	sense_data = ioctx->sense_data;
	sense_data_len = ioctx->cmd.scsi_sense_length;
	WARN_ON(sense_data_len > sizeof(ioctx->sense_data));

1345
	memset(srp_rsp, 0, sizeof(*srp_rsp));
1346 1347
	srp_rsp->opcode = SRP_RSP;
	srp_rsp->req_lim_delta =
1348
		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1349 1350 1351 1352 1353 1354 1355
	srp_rsp->tag = tag;
	srp_rsp->status = status;

	if (sense_data_len) {
		BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
		max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
		if (sense_data_len > max_sense_len) {
1356 1357
			pr_warn("truncated sense data from %d to %d"
				" bytes\n", sense_data_len, max_sense_len);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
			sense_data_len = max_sense_len;
		}

		srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
		srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
		memcpy(srp_rsp + 1, sense_data, sense_data_len);
	}

	return sizeof(*srp_rsp) + sense_data_len;
}

/**
1370
 * srpt_build_tskmgmt_rsp - build a task management response
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
 * @ch:       RDMA channel through which the request has been received.
 * @ioctx:    I/O context in which the SRP_RSP response will be built.
 * @rsp_code: RSP_CODE that will be stored in the response.
 * @tag:      Tag of the request for which this response is being generated.
 *
 * Returns the size in bytes of the SRP_RSP response.
 *
 * An SRP_RSP response contains a SCSI status or service response. See also
 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
 * response.
 */
static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
				  struct srpt_send_ioctx *ioctx,
				  u8 rsp_code, u64 tag)
{
	struct srp_rsp *srp_rsp;
	int resp_data_len;
	int resp_len;

1390
	resp_data_len = 4;
1391 1392 1393 1394
	resp_len = sizeof(*srp_rsp) + resp_data_len;

	srp_rsp = ioctx->ioctx.buf;
	BUG_ON(!srp_rsp);
1395
	memset(srp_rsp, 0, sizeof(*srp_rsp));
1396 1397

	srp_rsp->opcode = SRP_RSP;
1398 1399
	srp_rsp->req_lim_delta =
		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1400 1401
	srp_rsp->tag = tag;

1402 1403 1404
	srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
	srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
	srp_rsp->data[3] = rsp_code;
1405 1406 1407 1408 1409 1410

	return resp_len;
}

static int srpt_check_stop_free(struct se_cmd *cmd)
{
1411 1412
	struct srpt_send_ioctx *ioctx = container_of(cmd,
				struct srpt_send_ioctx, cmd);
1413

1414
	return target_put_sess_cmd(&ioctx->cmd);
1415 1416 1417
}

/**
1418 1419 1420 1421
 * srpt_handle_cmd - process a SRP_CMD information unit
 * @ch: SRPT RDMA channel.
 * @recv_ioctx: Receive I/O context.
 * @send_ioctx: Send I/O context.
1422
 */
1423 1424 1425
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
			    struct srpt_recv_ioctx *recv_ioctx,
			    struct srpt_send_ioctx *send_ioctx)
1426 1427 1428
{
	struct se_cmd *cmd;
	struct srp_cmd *srp_cmd;
1429 1430
	struct scatterlist *sg = NULL;
	unsigned sg_cnt = 0;
1431 1432
	u64 data_len;
	enum dma_data_direction dir;
1433
	int rc;
1434 1435 1436 1437 1438

	BUG_ON(!send_ioctx);

	srp_cmd = recv_ioctx->ioctx.buf;
	cmd = &send_ioctx->cmd;
1439
	cmd->tag = srp_cmd->tag;
1440 1441 1442

	switch (srp_cmd->task_attr) {
	case SRP_CMD_SIMPLE_Q:
C
Christoph Hellwig 已提交
1443
		cmd->sam_task_attr = TCM_SIMPLE_TAG;
1444 1445 1446
		break;
	case SRP_CMD_ORDERED_Q:
	default:
C
Christoph Hellwig 已提交
1447
		cmd->sam_task_attr = TCM_ORDERED_TAG;
1448 1449
		break;
	case SRP_CMD_HEAD_OF_Q:
C
Christoph Hellwig 已提交
1450
		cmd->sam_task_attr = TCM_HEAD_TAG;
1451 1452
		break;
	case SRP_CMD_ACA:
C
Christoph Hellwig 已提交
1453
		cmd->sam_task_attr = TCM_ACA_TAG;
1454 1455 1456
		break;
	}

1457 1458 1459 1460 1461 1462 1463
	rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
			&data_len);
	if (rc) {
		if (rc != -EAGAIN) {
			pr_err("0x%llx: parsing SRP descriptor table failed.\n",
			       srp_cmd->tag);
		}
1464
		goto release_ioctx;
1465 1466
	}

1467
	rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
B
Bart Van Assche 已提交
1468 1469
			       &send_ioctx->sense_data[0],
			       scsilun_to_int(&srp_cmd->lun), data_len,
1470 1471
			       TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
			       sg, sg_cnt, NULL, 0, NULL, 0);
1472
	if (rc != 0) {
1473 1474 1475
		pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
			 srp_cmd->tag);
		goto release_ioctx;
1476
	}
1477
	return;
1478

1479 1480 1481
release_ioctx:
	send_ioctx->state = SRPT_STATE_DONE;
	srpt_release_cmd(cmd);
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
}

static int srp_tmr_to_tcm(int fn)
{
	switch (fn) {
	case SRP_TSK_ABORT_TASK:
		return TMR_ABORT_TASK;
	case SRP_TSK_ABORT_TASK_SET:
		return TMR_ABORT_TASK_SET;
	case SRP_TSK_CLEAR_TASK_SET:
		return TMR_CLEAR_TASK_SET;
	case SRP_TSK_LUN_RESET:
		return TMR_LUN_RESET;
	case SRP_TSK_CLEAR_ACA:
		return TMR_CLEAR_ACA;
	default:
		return -1;
	}
}

/**
1503 1504 1505 1506
 * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
 * @ch: SRPT RDMA channel.
 * @recv_ioctx: Receive I/O context.
 * @send_ioctx: Send I/O context.
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
 *
 * Returns 0 if and only if the request will be processed by the target core.
 *
 * For more information about SRP_TSK_MGMT information units, see also section
 * 6.7 in the SRP r16a document.
 */
static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
				 struct srpt_recv_ioctx *recv_ioctx,
				 struct srpt_send_ioctx *send_ioctx)
{
	struct srp_tsk_mgmt *srp_tsk;
	struct se_cmd *cmd;
1519
	struct se_session *sess = ch->sess;
1520
	int tcm_tmr;
1521
	int rc;
1522 1523 1524 1525 1526 1527

	BUG_ON(!send_ioctx);

	srp_tsk = recv_ioctx->ioctx.buf;
	cmd = &send_ioctx->cmd;

1528 1529 1530
	pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
		 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
		 ch->sess);
1531 1532

	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1533
	send_ioctx->cmd.tag = srp_tsk->tag;
1534
	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
B
Bart Van Assche 已提交
1535 1536 1537 1538
	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
			       scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
			       GFP_KERNEL, srp_tsk->task_tag,
			       TARGET_SCF_ACK_KREF);
1539 1540
	if (rc != 0) {
		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1541
		goto fail;
1542
	}
1543 1544 1545
	return;
fail:
	transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1546 1547 1548
}

/**
1549
 * srpt_handle_new_iu - process a newly received information unit
1550
 * @ch:    RDMA channel through which the information unit has been received.
1551
 * @recv_ioctx: Receive I/O context associated with the information unit.
1552
 */
1553 1554
static bool
srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1555
{
1556
	struct srpt_send_ioctx *send_ioctx = NULL;
1557
	struct srp_cmd *srp_cmd;
1558 1559
	bool res = false;
	u8 opcode;
1560 1561 1562 1563

	BUG_ON(!ch);
	BUG_ON(!recv_ioctx);

1564 1565 1566
	if (unlikely(ch->state == CH_CONNECTING))
		goto push;

1567 1568 1569 1570 1571
	ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
				   recv_ioctx->ioctx.dma, srp_max_req_size,
				   DMA_FROM_DEVICE);

	srp_cmd = recv_ioctx->ioctx.buf;
1572 1573 1574
	opcode = srp_cmd->opcode;
	if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
		send_ioctx = srpt_get_send_ioctx(ch);
1575
		if (unlikely(!send_ioctx))
1576
			goto push;
1577 1578
	}

1579 1580 1581 1582 1583 1584
	if (!list_empty(&recv_ioctx->wait_list)) {
		WARN_ON_ONCE(!ch->processing_wait_list);
		list_del_init(&recv_ioctx->wait_list);
	}

	switch (opcode) {
1585 1586 1587 1588 1589 1590 1591
	case SRP_CMD:
		srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
		break;
	case SRP_TSK_MGMT:
		srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
		break;
	case SRP_I_LOGOUT:
1592
		pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1593 1594 1595 1596 1597 1598 1599 1600
		break;
	case SRP_CRED_RSP:
		pr_debug("received SRP_CRED_RSP\n");
		break;
	case SRP_AER_RSP:
		pr_debug("received SRP_AER_RSP\n");
		break;
	case SRP_RSP:
1601
		pr_err("Received SRP_RSP\n");
1602 1603
		break;
	default:
1604
		pr_err("received IU with unknown opcode 0x%x\n", opcode);
1605 1606 1607
		break;
	}

1608
	srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1609
	res = true;
1610

1611 1612 1613 1614 1615 1616 1617 1618 1619
out:
	return res;

push:
	if (list_empty(&recv_ioctx->wait_list)) {
		WARN_ON_ONCE(ch->processing_wait_list);
		list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
	}
	goto out;
1620 1621
}

1622
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1623
{
1624 1625 1626
	struct srpt_rdma_ch *ch = cq->cq_context;
	struct srpt_recv_ioctx *ioctx =
		container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1627 1628 1629 1630 1631 1632

	if (wc->status == IB_WC_SUCCESS) {
		int req_lim;

		req_lim = atomic_dec_return(&ch->req_lim);
		if (unlikely(req_lim < 0))
1633
			pr_err("req_lim = %d < 0\n", req_lim);
1634
		srpt_handle_new_iu(ch, ioctx);
1635
	} else {
1636 1637
		pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
				    ioctx, wc->status);
1638 1639 1640
	}
}

1641 1642 1643 1644 1645 1646 1647
/*
 * This function must be called from the context in which RDMA completions are
 * processed because it accesses the wait list without protection against
 * access from other threads.
 */
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
1648
	struct srpt_recv_ioctx *recv_ioctx, *tmp;
1649

1650
	WARN_ON_ONCE(ch->state == CH_CONNECTING);
1651

1652 1653 1654 1655 1656 1657 1658 1659 1660
	if (list_empty(&ch->cmd_wait_list))
		return;

	WARN_ON_ONCE(ch->processing_wait_list);
	ch->processing_wait_list = true;
	list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
				 wait_list) {
		if (!srpt_handle_new_iu(ch, recv_ioctx))
			break;
1661
	}
1662
	ch->processing_wait_list = false;
1663 1664
}

1665
/**
1666 1667 1668 1669
 * srpt_send_done - send completion callback
 * @cq: Completion queue.
 * @wc: Work completion.
 *
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
 * Note: Although this has not yet been observed during tests, at least in
 * theory it is possible that the srpt_get_send_ioctx() call invoked by
 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
 * value in each response is set to one, and it is possible that this response
 * makes the initiator send a new request before the send completion for that
 * response has been processed. This could e.g. happen if the call to
 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
 * if IB retransmission causes generation of the send completion to be
 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
 * are queued on cmd_wait_list. The code below processes these delayed
 * requests one at a time.
 */
1682
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1683
{
1684 1685 1686 1687
	struct srpt_rdma_ch *ch = cq->cq_context;
	struct srpt_send_ioctx *ioctx =
		container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
	enum srpt_command_state state;
1688

1689 1690 1691 1692 1693
	state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);

	WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
		state != SRPT_STATE_MGMT_RSP_SENT);

1694
	atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1695

1696
	if (wc->status != IB_WC_SUCCESS)
1697 1698 1699 1700 1701
		pr_info("sending response for ioctx 0x%p failed"
			" with status %d\n", ioctx, wc->status);

	if (state != SRPT_STATE_DONE) {
		transport_generic_free_cmd(&ioctx->cmd, 0);
1702
	} else {
1703 1704
		pr_err("IB completion has been received too late for"
		       " wr_id = %u.\n", ioctx->ioctx.index);
1705 1706
	}

1707
	srpt_process_wait_list(ch);
1708 1709 1710
}

/**
1711 1712
 * srpt_create_ch_ib - create receive and send completion queues
 * @ch: SRPT RDMA channel.
1713 1714 1715 1716 1717 1718
 */
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
	struct ib_qp_init_attr *qp_init;
	struct srpt_port *sport = ch->sport;
	struct srpt_device *sdev = sport->sdev;
1719
	const struct ib_device_attr *attrs = &sdev->device->attrs;
1720
	int sq_size = sport->port_attrib.srp_sq_size;
1721
	int i, ret;
1722 1723 1724 1725

	WARN_ON(ch->rq_size < 1);

	ret = -ENOMEM;
1726
	qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1727 1728 1729
	if (!qp_init)
		goto out;

1730
retry:
1731
	ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
1732
			0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1733 1734
	if (IS_ERR(ch->cq)) {
		ret = PTR_ERR(ch->cq);
1735
		pr_err("failed to create CQ cqe= %d ret= %d\n",
1736
		       ch->rq_size + sq_size, ret);
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
		goto out;
	}

	qp_init->qp_context = (void *)ch;
	qp_init->event_handler
		= (void(*)(struct ib_event *, void*))srpt_qp_event;
	qp_init->send_cq = ch->cq;
	qp_init->recv_cq = ch->cq;
	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
	qp_init->qp_type = IB_QPT_RC;
1747 1748 1749 1750 1751 1752 1753
	/*
	 * We divide up our send queue size into half SEND WRs to send the
	 * completions, and half R/W contexts to actually do the RDMA
	 * READ/WRITE transfers.  Note that we need to allocate CQ slots for
	 * both both, as RDMA contexts will also post completions for the
	 * RDMA READ case.
	 */
1754 1755
	qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
	qp_init->cap.max_rdma_ctxs = sq_size / 2;
1756 1757
	qp_init->cap.max_send_sge = min(attrs->max_send_sge,
					SRPT_MAX_SG_PER_WQE);
1758
	qp_init->port_num = ch->sport->port;
1759 1760 1761 1762
	if (sdev->use_srq) {
		qp_init->srq = sdev->srq;
	} else {
		qp_init->cap.max_recv_wr = ch->rq_size;
1763 1764
		qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
						SRPT_MAX_SG_PER_WQE);
1765
	}
1766

B
Bart Van Assche 已提交
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	if (ch->using_rdma_cm) {
		ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
		ch->qp = ch->rdma_cm.cm_id->qp;
	} else {
		ch->qp = ib_create_qp(sdev->pd, qp_init);
		if (!IS_ERR(ch->qp)) {
			ret = srpt_init_ch_qp(ch, ch->qp);
			if (ret)
				ib_destroy_qp(ch->qp);
		} else {
			ret = PTR_ERR(ch->qp);
		}
	}
	if (ret) {
		bool retry = sq_size > MIN_SRPT_SQ_SIZE;

		if (retry) {
			pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
				 sq_size, ret);
			ib_free_cq(ch->cq);
			sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
			goto retry;
		} else {
			pr_err("failed to create queue pair with sq_size = %d (%d)\n",
			       sq_size, ret);
			goto err_destroy_cq;
1793
		}
1794 1795 1796 1797
	}

	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);

1798
	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1799
		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1800
		 qp_init->cap.max_send_wr, ch);
1801

1802 1803 1804 1805
	if (!sdev->use_srq)
		for (i = 0; i < ch->rq_size; i++)
			srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);

1806 1807 1808 1809 1810
out:
	kfree(qp_init);
	return ret;

err_destroy_cq:
B
Bart Van Assche 已提交
1811
	ch->qp = NULL;
1812
	ib_free_cq(ch->cq);
1813 1814 1815 1816 1817 1818
	goto out;
}

static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
{
	ib_destroy_qp(ch->qp);
1819
	ib_free_cq(ch->cq);
1820 1821 1822
}

/**
1823 1824
 * srpt_close_ch - close a RDMA channel
 * @ch: SRPT RDMA channel.
1825
 *
1826 1827
 * Make sure all resources associated with the channel will be deallocated at
 * an appropriate time.
1828
 *
1829 1830
 * Returns true if and only if the channel state has been modified into
 * CH_DRAINING.
1831
 */
1832
static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1833
{
1834
	int ret;
1835

1836 1837 1838 1839
	if (!srpt_set_ch_state(ch, CH_DRAINING)) {
		pr_debug("%s-%d: already closed\n", ch->sess_name,
			 ch->qp->qp_num);
		return false;
1840 1841
	}

1842
	kref_get(&ch->kref);
1843

1844 1845 1846 1847
	ret = srpt_ch_qp_err(ch);
	if (ret < 0)
		pr_err("%s-%d: changing queue pair into error state failed: %d\n",
		       ch->sess_name, ch->qp->qp_num, ret);
1848

1849 1850 1851 1852 1853 1854 1855 1856 1857
	ret = srpt_zerolength_write(ch);
	if (ret < 0) {
		pr_err("%s-%d: queuing zero-length write failed: %d\n",
		       ch->sess_name, ch->qp->qp_num, ret);
		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
			schedule_work(&ch->release_work);
		else
			WARN_ON_ONCE(true);
	}
1858

1859 1860 1861
	kref_put(&ch->kref, srpt_free_ch);

	return true;
1862 1863
}

1864 1865 1866 1867 1868 1869 1870 1871
/*
 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
 * reached the connected state, close it. If a channel is in the connected
 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
 * the responsibility of the caller to ensure that this function is not
 * invoked concurrently with the code that accepts a connection. This means
 * that this function must either be invoked from inside a CM callback
 * function or that it must be invoked with the srpt_port.mutex held.
1872
 */
1873
static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1874 1875 1876
{
	int ret;

1877 1878
	if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
		return -ENOTCONN;
1879

B
Bart Van Assche 已提交
1880 1881 1882 1883 1884 1885 1886
	if (ch->using_rdma_cm) {
		ret = rdma_disconnect(ch->rdma_cm.cm_id);
	} else {
		ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
		if (ret < 0)
			ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
	}
1887

1888 1889
	if (ret < 0 && srpt_close_ch(ch))
		ret = 0;
1890

1891 1892 1893
	return ret;
}

B
Bart Van Assche 已提交
1894
static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
1895
{
1896
	struct srpt_nexus *nexus;
1897 1898 1899 1900
	struct srpt_rdma_ch *ch2;
	bool res = true;

	rcu_read_lock();
1901 1902 1903 1904 1905 1906
	list_for_each_entry(nexus, &sport->nexus_list, entry) {
		list_for_each_entry(ch2, &nexus->ch_list, list) {
			if (ch2 == ch) {
				res = false;
				goto done;
			}
1907 1908
		}
	}
1909
done:
1910 1911 1912 1913 1914
	rcu_read_unlock();

	return res;
}

1915 1916
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1917
{
B
Bart Van Assche 已提交
1918
	struct srpt_port *sport = ch->sport;
1919 1920 1921 1922

	pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
		 ch->state);

1923 1924
	mutex_lock(&sport->mutex);
	srpt_disconnect_ch(ch);
B
Bart Van Assche 已提交
1925
	mutex_unlock(&sport->mutex);
1926

B
Bart Van Assche 已提交
1927
	while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
1928
				  5 * HZ) == 0)
1929 1930 1931 1932 1933
		pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
			ch->sess_name, ch->qp->qp_num, ch->state);

}

1934
static void __srpt_close_all_ch(struct srpt_port *sport)
1935
{
1936
	struct srpt_nexus *nexus;
1937 1938
	struct srpt_rdma_ch *ch;

B
Bart Van Assche 已提交
1939
	lockdep_assert_held(&sport->mutex);
1940

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
	list_for_each_entry(nexus, &sport->nexus_list, entry) {
		list_for_each_entry(ch, &nexus->ch_list, list) {
			if (srpt_disconnect_ch(ch) >= 0)
				pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
					ch->sess_name, ch->qp->qp_num,
					sport->sdev->device->name, sport->port);
			srpt_close_ch(ch);
		}
	}
}

/*
 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
 * it does not yet exist.
 */
static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
					 const u8 i_port_id[16],
					 const u8 t_port_id[16])
{
	struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
1961

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	for (;;) {
		mutex_lock(&sport->mutex);
		list_for_each_entry(n, &sport->nexus_list, entry) {
			if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
			    memcmp(n->t_port_id, t_port_id, 16) == 0) {
				nexus = n;
				break;
			}
		}
		if (!nexus && tmp_nexus) {
			list_add_tail_rcu(&tmp_nexus->entry,
					  &sport->nexus_list);
			swap(nexus, tmp_nexus);
		}
		mutex_unlock(&sport->mutex);

		if (nexus)
			break;
		tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
		if (!tmp_nexus) {
			nexus = ERR_PTR(-ENOMEM);
			break;
1984
		}
1985 1986 1987
		INIT_LIST_HEAD(&tmp_nexus->ch_list);
		memcpy(tmp_nexus->i_port_id, i_port_id, 16);
		memcpy(tmp_nexus->t_port_id, t_port_id, 16);
1988
	}
1989

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
	kfree(tmp_nexus);

	return nexus;
}

static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
	__must_hold(&sport->mutex)
{
	lockdep_assert_held(&sport->mutex);

	if (sport->enabled == enabled)
		return;
	sport->enabled = enabled;
	if (!enabled)
		__srpt_close_all_ch(sport);
2005 2006
}

2007 2008 2009 2010
static void srpt_free_ch(struct kref *kref)
{
	struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);

2011
	kfree_rcu(ch, rcu);
2012 2013 2014 2015 2016 2017
}

static void srpt_release_channel_work(struct work_struct *w)
{
	struct srpt_rdma_ch *ch;
	struct srpt_device *sdev;
B
Bart Van Assche 已提交
2018
	struct srpt_port *sport;
2019
	struct se_session *se_sess;
2020 2021

	ch = container_of(w, struct srpt_rdma_ch, release_work);
2022
	pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
2023 2024 2025 2026

	sdev = ch->sport->sdev;
	BUG_ON(!sdev);

2027 2028 2029
	se_sess = ch->sess;
	BUG_ON(!se_sess);

2030
	target_sess_cmd_list_set_waiting(se_sess);
2031
	target_wait_for_sess_cmds(se_sess);
2032 2033 2034

	transport_deregister_session_configfs(se_sess);
	transport_deregister_session(se_sess);
2035 2036
	ch->sess = NULL;

B
Bart Van Assche 已提交
2037 2038 2039 2040
	if (ch->using_rdma_cm)
		rdma_destroy_id(ch->rdma_cm.cm_id);
	else
		ib_destroy_cm_id(ch->ib_cm.cm_id);
2041

2042 2043 2044 2045
	srpt_destroy_ch_ib(ch);

	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
			     ch->sport->sdev, ch->rq_size,
2046
			     ch->max_rsp_size, DMA_TO_DEVICE);
2047

2048 2049 2050 2051
	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
			     sdev, ch->rq_size,
			     srp_max_req_size, DMA_FROM_DEVICE);

B
Bart Van Assche 已提交
2052 2053
	sport = ch->sport;
	mutex_lock(&sport->mutex);
2054
	list_del_rcu(&ch->list);
B
Bart Van Assche 已提交
2055
	mutex_unlock(&sport->mutex);
2056

B
Bart Van Assche 已提交
2057
	wake_up(&sport->ch_releaseQ);
2058

2059
	kref_put(&ch->kref, srpt_free_ch);
2060 2061 2062
}

/**
2063
 * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
B
Bart Van Assche 已提交
2064 2065 2066 2067
 * @sdev: HCA through which the login request was received.
 * @ib_cm_id: IB/CM connection identifier in case of IB/CM.
 * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
 * @port_num: Port through which the REQ message was received.
2068 2069
 * @pkey: P_Key of the incoming connection.
 * @req: SRP login request.
B
Bart Van Assche 已提交
2070 2071
 * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
 * the login request.
2072 2073
 *
 * Ownership of the cm_id is transferred to the target session if this
B
Bart Van Assche 已提交
2074
 * function returns zero. Otherwise the caller remains the owner of cm_id.
2075
 */
B
Bart Van Assche 已提交
2076 2077 2078
static int srpt_cm_req_recv(struct srpt_device *const sdev,
			    struct ib_cm_id *ib_cm_id,
			    struct rdma_cm_id *rdma_cm_id,
2079 2080 2081
			    u8 port_num, __be16 pkey,
			    const struct srp_login_req *req,
			    const char *src_addr)
2082
{
2083
	struct srpt_port *sport = &sdev->port[port_num - 1];
2084 2085 2086
	struct srpt_nexus *nexus;
	struct srp_login_rsp *rsp = NULL;
	struct srp_login_rej *rej = NULL;
B
Bart Van Assche 已提交
2087 2088 2089 2090
	union {
		struct rdma_conn_param rdma_cm;
		struct ib_cm_rep_param ib_cm;
	} *rep_param = NULL;
2091
	struct srpt_rdma_ch *ch;
2092
	char i_port_id[36];
2093
	u32 it_iu_len;
2094
	int i, ret;
2095 2096 2097

	WARN_ON_ONCE(irqs_disabled());

2098
	if (WARN_ON(!sdev || !req))
2099 2100 2101 2102
		return -EINVAL;

	it_iu_len = be32_to_cpu(req->req_it_iu_len);

B
Bart Van Assche 已提交
2103
	pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2104
		req->initiator_port_id, req->target_port_id, it_iu_len,
2105
		port_num, &sport->gid, be16_to_cpu(pkey));
2106

2107 2108 2109 2110 2111 2112 2113
	nexus = srpt_get_nexus(sport, req->initiator_port_id,
			       req->target_port_id);
	if (IS_ERR(nexus)) {
		ret = PTR_ERR(nexus);
		goto out;
	}

2114
	ret = -ENOMEM;
2115 2116 2117
	rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
	rej = kzalloc(sizeof(*rej), GFP_KERNEL);
	rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2118
	if (!rsp || !rej || !rep_param)
2119 2120
		goto out;

2121
	ret = -EINVAL;
2122
	if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2123
		rej->reason = cpu_to_be32(
2124 2125
				SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
		pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2126 2127 2128 2129 2130
		       it_iu_len, 64, srp_max_req_size);
		goto reject;
	}

	if (!sport->enabled) {
2131 2132
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
		pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2133
			sport->sdev->device->name, port_num);
2134 2135 2136 2137 2138 2139
		goto reject;
	}

	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
	    || *(__be64 *)(req->target_port_id + 8) !=
	       cpu_to_be64(srpt_service_guid)) {
2140
		rej->reason = cpu_to_be32(
2141 2142
				SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
		pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2143 2144 2145
		goto reject;
	}

2146
	ret = -ENOMEM;
2147
	ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2148
	if (!ch) {
2149 2150
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
		pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2151 2152 2153
		goto reject;
	}

2154
	kref_init(&ch->kref);
2155
	ch->pkey = be16_to_cpu(pkey);
2156
	ch->nexus = nexus;
2157
	ch->zw_cqe.done = srpt_zerolength_write_done;
2158
	INIT_WORK(&ch->release_work, srpt_release_channel_work);
2159
	ch->sport = sport;
B
Bart Van Assche 已提交
2160 2161 2162 2163 2164 2165 2166 2167
	if (ib_cm_id) {
		ch->ib_cm.cm_id = ib_cm_id;
		ib_cm_id->context = ch;
	} else {
		ch->using_rdma_cm = true;
		ch->rdma_cm.cm_id = rdma_cm_id;
		rdma_cm_id->context = ch;
	}
2168
	/*
2169 2170 2171
	 * ch->rq_size should be at least as large as the initiator queue
	 * depth to avoid that the initiator driver has to report QUEUE_FULL
	 * to the SCSI mid-layer.
2172
	 */
2173
	ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2174 2175 2176
	spin_lock_init(&ch->spinlock);
	ch->state = CH_CONNECTING;
	INIT_LIST_HEAD(&ch->cmd_wait_list);
2177
	ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2178 2179 2180 2181

	ch->ioctx_ring = (struct srpt_send_ioctx **)
		srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
				      sizeof(*ch->ioctx_ring[0]),
2182
				      ch->max_rsp_size, DMA_TO_DEVICE);
2183 2184 2185
	if (!ch->ioctx_ring) {
		pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2186
		goto free_ch;
2187
	}
2188

2189 2190 2191 2192 2193
	INIT_LIST_HEAD(&ch->free_list);
	for (i = 0; i < ch->rq_size; i++) {
		ch->ioctx_ring[i]->ch = ch;
		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
	}
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
	if (!sdev->use_srq) {
		ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
			srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
					      sizeof(*ch->ioctx_recv_ring[0]),
					      srp_max_req_size,
					      DMA_FROM_DEVICE);
		if (!ch->ioctx_recv_ring) {
			pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
			rej->reason =
			    cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
			goto free_ring;
		}
2206 2207
		for (i = 0; i < ch->rq_size; i++)
			INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2208
	}
2209

2210 2211
	ret = srpt_create_ch_ib(ch);
	if (ret) {
2212
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2213 2214
		pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
		goto free_recv_ring;
2215
	}
2216

2217
	strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2218
	snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
2219 2220
			be64_to_cpu(*(__be64 *)nexus->i_port_id),
			be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
2221 2222 2223

	pr_debug("registering session %s\n", ch->sess_name);

2224 2225 2226
	if (sport->port_guid_tpg.se_tpg_wwn)
		ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
						TARGET_PROT_NORMAL,
2227
						ch->sess_name, ch, NULL);
2228 2229
	if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
		ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2230
					TARGET_PROT_NORMAL, i_port_id, ch,
2231 2232
					NULL);
	/* Retry without leading "0x" */
2233 2234
	if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
		ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
2235
						TARGET_PROT_NORMAL,
2236
						i_port_id + 2, ch, NULL);
2237
	if (IS_ERR_OR_NULL(ch->sess)) {
2238 2239 2240 2241
		ret = PTR_ERR(ch->sess);
		pr_info("Rejected login for initiator %s: ret = %d.\n",
			ch->sess_name, ret);
		rej->reason = cpu_to_be32(ret == -ENOMEM ?
2242
				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2243
				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
		goto reject;
	}

	mutex_lock(&sport->mutex);

	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
		struct srpt_rdma_ch *ch2;

		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;

		list_for_each_entry(ch2, &nexus->ch_list, list) {
			if (srpt_disconnect_ch(ch2) < 0)
				continue;
			pr_info("Relogin - closed existing channel %s\n",
				ch2->sess_name);
			rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
		}
	} else {
		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
	}

	list_add_tail_rcu(&ch->list, &nexus->ch_list);

	if (!sport->enabled) {
		rej->reason = cpu_to_be32(
				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
		pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2271
			sdev->device->name, port_num);
2272 2273 2274 2275 2276 2277
		mutex_unlock(&sport->mutex);
		goto reject;
	}

	mutex_unlock(&sport->mutex);

B
Bart Van Assche 已提交
2278
	ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
2279 2280 2281 2282
	if (ret) {
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
		pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
		       ret);
2283
		goto destroy_ib;
2284 2285
	}

2286 2287
	pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
		 ch->sess_name, ch);
2288 2289 2290 2291 2292 2293 2294

	/* create srp_login_response */
	rsp->opcode = SRP_LOGIN_RSP;
	rsp->tag = req->tag;
	rsp->max_it_iu_len = req->req_it_iu_len;
	rsp->max_ti_iu_len = req->req_it_iu_len;
	ch->max_ti_iu_len = it_iu_len;
2295 2296
	rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
				   SRP_BUF_FORMAT_INDIRECT);
2297 2298 2299 2300 2301
	rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
	atomic_set(&ch->req_lim, ch->rq_size);
	atomic_set(&ch->req_lim_delta, 0);

	/* create cm reply */
B
Bart Van Assche 已提交
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
	if (ch->using_rdma_cm) {
		rep_param->rdma_cm.private_data = (void *)rsp;
		rep_param->rdma_cm.private_data_len = sizeof(*rsp);
		rep_param->rdma_cm.rnr_retry_count = 7;
		rep_param->rdma_cm.flow_control = 1;
		rep_param->rdma_cm.responder_resources = 4;
		rep_param->rdma_cm.initiator_depth = 4;
	} else {
		rep_param->ib_cm.qp_num = ch->qp->qp_num;
		rep_param->ib_cm.private_data = (void *)rsp;
		rep_param->ib_cm.private_data_len = sizeof(*rsp);
		rep_param->ib_cm.rnr_retry_count = 7;
		rep_param->ib_cm.flow_control = 1;
		rep_param->ib_cm.failover_accepted = 0;
		rep_param->ib_cm.srq = 1;
		rep_param->ib_cm.responder_resources = 4;
		rep_param->ib_cm.initiator_depth = 4;
	}
2320

2321 2322 2323 2324
	/*
	 * Hold the sport mutex while accepting a connection to avoid that
	 * srpt_disconnect_ch() is invoked concurrently with this code.
	 */
B
Bart Van Assche 已提交
2325
	mutex_lock(&sport->mutex);
B
Bart Van Assche 已提交
2326 2327 2328 2329 2330 2331
	if (sport->enabled && ch->state == CH_CONNECTING) {
		if (ch->using_rdma_cm)
			ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
		else
			ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
	} else {
2332
		ret = -EINVAL;
B
Bart Van Assche 已提交
2333
	}
B
Bart Van Assche 已提交
2334
	mutex_unlock(&sport->mutex);
2335

2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
	switch (ret) {
	case 0:
		break;
	case -EINVAL:
		goto reject;
	default:
		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
		pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
		       ret);
		goto reject;
	}
2347

2348
	goto out;
2349 2350 2351 2352

destroy_ib:
	srpt_destroy_ch_ib(ch);

2353 2354 2355 2356 2357
free_recv_ring:
	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
			     ch->sport->sdev, ch->rq_size,
			     srp_max_req_size, DMA_FROM_DEVICE);

2358 2359 2360
free_ring:
	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
			     ch->sport->sdev, ch->rq_size,
2361
			     ch->max_rsp_size, DMA_TO_DEVICE);
2362
free_ch:
B
Bart Van Assche 已提交
2363 2364
	if (ib_cm_id)
		ib_cm_id->context = NULL;
2365
	kfree(ch);
2366 2367 2368
	ch = NULL;

	WARN_ON_ONCE(ret == 0);
2369 2370

reject:
2371
	pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
2372 2373
	rej->opcode = SRP_LOGIN_REJ;
	rej->tag = req->tag;
2374 2375
	rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
				   SRP_BUF_FORMAT_INDIRECT);
2376

B
Bart Van Assche 已提交
2377 2378 2379 2380 2381
	if (rdma_cm_id)
		rdma_reject(rdma_cm_id, rej, sizeof(*rej));
	else
		ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
			       rej, sizeof(*rej));
2382 2383 2384 2385 2386 2387 2388 2389 2390

out:
	kfree(rep_param);
	kfree(rsp);
	kfree(rej);

	return ret;
}

2391 2392 2393 2394 2395 2396 2397 2398 2399
static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
			       struct ib_cm_req_event_param *param,
			       void *private_data)
{
	char sguid[40];

	srpt_format_guid(sguid, sizeof(sguid),
			 &param->primary_path->dgid.global.interface_id);

B
Bart Van Assche 已提交
2400 2401
	return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
				param->primary_path->pkey,
2402 2403 2404
				private_data, sguid);
}

B
Bart Van Assche 已提交
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
				 struct rdma_cm_event *event)
{
	struct srpt_device *sdev;
	struct srp_login_req req;
	const struct srp_login_req_rdma *req_rdma;
	char src_addr[40];

	sdev = ib_get_client_data(cm_id->device, &srpt_client);
	if (!sdev)
		return -ECONNREFUSED;

	if (event->param.conn.private_data_len < sizeof(*req_rdma))
		return -EINVAL;

	/* Transform srp_login_req_rdma into srp_login_req. */
	req_rdma = event->param.conn.private_data;
	memset(&req, 0, sizeof(req));
	req.opcode		= req_rdma->opcode;
	req.tag			= req_rdma->tag;
	req.req_it_iu_len	= req_rdma->req_it_iu_len;
	req.req_buf_fmt		= req_rdma->req_buf_fmt;
	req.req_flags		= req_rdma->req_flags;
	memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
	memcpy(req.target_port_id, req_rdma->target_port_id, 16);

	snprintf(src_addr, sizeof(src_addr), "%pIS",
		 &cm_id->route.addr.src_addr);

	return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
				cm_id->route.path_rec->pkey, &req, src_addr);
}

2438 2439 2440 2441
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
			     enum ib_cm_rej_reason reason,
			     const u8 *private_data,
			     u8 private_data_len)
2442
{
2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	char *priv = NULL;
	int i;

	if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
						GFP_KERNEL))) {
		for (i = 0; i < private_data_len; i++)
			sprintf(priv + 3 * i, " %02x", private_data[i]);
	}
	pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
		ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
		"; private data" : "", priv ? priv : " (?)");
	kfree(priv);
2455 2456 2457
}

/**
2458 2459
 * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
 * @ch: SRPT RDMA channel.
2460
 *
B
Bart Van Assche 已提交
2461 2462
 * An RTU (ready to use) message indicates that the connection has been
 * established and that the recipient may begin transmitting.
2463
 */
2464
static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2465 2466 2467
{
	int ret;

B
Bart Van Assche 已提交
2468
	ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
2469 2470 2471 2472 2473
	if (ret < 0) {
		pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
		       ch->qp->qp_num);
		srpt_close_ch(ch);
		return;
2474
	}
2475 2476 2477 2478 2479 2480

	/*
	 * Note: calling srpt_close_ch() if the transition to the LIVE state
	 * fails is not necessary since that means that that function has
	 * already been invoked from another thread.
	 */
2481
	if (!srpt_set_ch_state(ch, CH_LIVE)) {
2482 2483
		pr_err("%s-%d: channel transition to LIVE state failed\n",
		       ch->sess_name, ch->qp->qp_num);
2484 2485 2486 2487 2488 2489
		return;
	}

	/* Trigger wait list processing. */
	ret = srpt_zerolength_write(ch);
	WARN_ONCE(ret < 0, "%d\n", ret);
2490 2491 2492
}

/**
2493 2494 2495
 * srpt_cm_handler - IB connection manager callback function
 * @cm_id: IB/CM connection identifier.
 * @event: IB/CM event.
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
 *
 * A non-zero return value will cause the caller destroy the CM ID.
 *
 * Note: srpt_cm_handler() must only return a non-zero value when transferring
 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
 * a non-zero value in any other case will trigger a race with the
 * ib_destroy_cm_id() call in srpt_release_channel().
 */
static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
2506
	struct srpt_rdma_ch *ch = cm_id->context;
2507 2508 2509 2510 2511
	int ret;

	ret = 0;
	switch (event->event) {
	case IB_CM_REQ_RECEIVED:
2512 2513
		ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
					  event->private_data);
2514 2515
		break;
	case IB_CM_REJ_RECEIVED:
2516 2517 2518
		srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
				 event->private_data,
				 IB_CM_REJ_PRIVATE_DATA_SIZE);
2519 2520 2521
		break;
	case IB_CM_RTU_RECEIVED:
	case IB_CM_USER_ESTABLISHED:
2522
		srpt_cm_rtu_recv(ch);
2523 2524
		break;
	case IB_CM_DREQ_RECEIVED:
2525
		srpt_disconnect_ch(ch);
2526 2527
		break;
	case IB_CM_DREP_RECEIVED:
2528 2529
		pr_info("Received CM DREP message for ch %s-%d.\n",
			ch->sess_name, ch->qp->qp_num);
2530
		srpt_close_ch(ch);
2531 2532
		break;
	case IB_CM_TIMEWAIT_EXIT:
2533 2534
		pr_info("Received CM TimeWait exit for ch %s-%d.\n",
			ch->sess_name, ch->qp->qp_num);
2535
		srpt_close_ch(ch);
2536 2537
		break;
	case IB_CM_REP_ERROR:
2538 2539
		pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
			ch->qp->qp_num);
2540 2541
		break;
	case IB_CM_DREQ_ERROR:
2542
		pr_info("Received CM DREQ ERROR event.\n");
2543 2544
		break;
	case IB_CM_MRA_RECEIVED:
2545
		pr_info("Received CM MRA event\n");
2546 2547
		break;
	default:
2548
		pr_err("received unrecognized CM event %d\n", event->event);
2549 2550 2551 2552 2553 2554
		break;
	}

	return ret;
}

B
Bart Van Assche 已提交
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
				struct rdma_cm_event *event)
{
	struct srpt_rdma_ch *ch = cm_id->context;
	int ret = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_CONNECT_REQUEST:
		ret = srpt_rdma_cm_req_recv(cm_id, event);
		break;
	case RDMA_CM_EVENT_REJECTED:
		srpt_cm_rej_recv(ch, event->status,
				 event->param.conn.private_data,
				 event->param.conn.private_data_len);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		srpt_cm_rtu_recv(ch);
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
		if (ch->state < CH_DISCONNECTING)
			srpt_disconnect_ch(ch);
		else
			srpt_close_ch(ch);
		break;
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		srpt_close_ch(ch);
		break;
	case RDMA_CM_EVENT_UNREACHABLE:
		pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
			ch->qp->qp_num);
		break;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
	case RDMA_CM_EVENT_ADDR_CHANGE:
		break;
	default:
		pr_err("received unrecognized RDMA CM event %d\n",
		       event->event);
		break;
	}

	return ret;
}

2598 2599 2600 2601 2602
static int srpt_write_pending_status(struct se_cmd *se_cmd)
{
	struct srpt_send_ioctx *ioctx;

	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2603
	return ioctx->state == SRPT_STATE_NEED_DATA;
2604 2605 2606
}

/*
2607
 * srpt_write_pending - Start data transfer from initiator to target (write).
2608 2609 2610
 */
static int srpt_write_pending(struct se_cmd *se_cmd)
{
2611 2612 2613
	struct srpt_send_ioctx *ioctx =
		container_of(se_cmd, struct srpt_send_ioctx, cmd);
	struct srpt_rdma_ch *ch = ioctx->ch;
2614 2615
	struct ib_send_wr *first_wr = NULL, *bad_wr;
	struct ib_cqe *cqe = &ioctx->rdma_cqe;
2616
	enum srpt_command_state new_state;
2617
	int ret, i;
2618 2619 2620

	new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
	WARN_ON(new_state == SRPT_STATE_DONE);
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636

	if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
		pr_warn("%s: IB send queue full (needed %d)\n",
				__func__, ioctx->n_rdma);
		ret = -ENOMEM;
		goto out_undo;
	}

	cqe->done = srpt_rdma_read_done;
	for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
		struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];

		first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
				cqe, first_wr);
		cqe = NULL;
	}
2637

2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
	if (ret) {
		pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
			 __func__, ret, ioctx->n_rdma,
			 atomic_read(&ch->sq_wr_avail));
		goto out_undo;
	}

	return 0;
out_undo:
	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
	return ret;
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
}

static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
{
	switch (tcm_mgmt_status) {
	case TMR_FUNCTION_COMPLETE:
		return SRP_TSK_MGMT_SUCCESS;
	case TMR_FUNCTION_REJECTED:
		return SRP_TSK_MGMT_FUNC_NOT_SUPP;
	}
	return SRP_TSK_MGMT_FAILED;
}

/**
2664 2665
 * srpt_queue_response - transmit the response to a SCSI command
 * @cmd: SCSI target command.
2666 2667 2668 2669
 *
 * Callback function called by the TCM core. Must not block since it can be
 * invoked on the context of the IB completion handler.
 */
2670
static void srpt_queue_response(struct se_cmd *cmd)
2671
{
2672 2673 2674 2675
	struct srpt_send_ioctx *ioctx =
		container_of(cmd, struct srpt_send_ioctx, cmd);
	struct srpt_rdma_ch *ch = ioctx->ch;
	struct srpt_device *sdev = ch->sport->sdev;
2676
	struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
2677
	struct ib_sge sge;
2678
	enum srpt_command_state state;
2679
	int resp_len, ret, i;
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
	u8 srp_tm_status;

	BUG_ON(!ch);

	state = ioctx->state;
	switch (state) {
	case SRPT_STATE_NEW:
	case SRPT_STATE_DATA_IN:
		ioctx->state = SRPT_STATE_CMD_RSP_SENT;
		break;
	case SRPT_STATE_MGMT:
		ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
		break;
	default:
		WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
			ch, ioctx->ioctx.index, ioctx->state);
		break;
	}

B
Bart Van Assche 已提交
2699
	if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2700
		return;
2701 2702

	/* For read commands, transfer the data to the initiator. */
2703 2704
	if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
	    ioctx->cmd.data_length &&
2705
	    !ioctx->queue_status_only) {
2706 2707 2708 2709
		for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
			struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];

			first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2710
					ch->sport->port, NULL, first_wr);
2711 2712 2713 2714
		}
	}

	if (state != SRPT_STATE_MGMT)
2715
		resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2716 2717 2718 2719 2720
					      cmd->scsi_status);
	else {
		srp_tm_status
			= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
		resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2721
						 ioctx->cmd.tag);
2722
	}
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738

	atomic_inc(&ch->req_lim);

	if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
			&ch->sq_wr_avail) < 0)) {
		pr_warn("%s: IB send queue full (needed %d)\n",
				__func__, ioctx->n_rdma);
		ret = -ENOMEM;
		goto out;
	}

	ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
				      DMA_TO_DEVICE);

	sge.addr = ioctx->ioctx.dma;
	sge.length = resp_len;
B
Bart Van Assche 已提交
2739
	sge.lkey = sdev->lkey;
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753

	ioctx->ioctx.cqe.done = srpt_send_done;
	send_wr.next = NULL;
	send_wr.wr_cqe = &ioctx->ioctx.cqe;
	send_wr.sg_list = &sge;
	send_wr.num_sge = 1;
	send_wr.opcode = IB_WR_SEND;
	send_wr.send_flags = IB_SEND_SIGNALED;

	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
	if (ret < 0) {
		pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
			__func__, ioctx->cmd.tag, ret);
		goto out;
2754
	}
2755 2756 2757 2758 2759 2760 2761 2762

	return;

out:
	atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
	atomic_dec(&ch->req_lim);
	srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
	target_put_sess_cmd(&ioctx->cmd);
2763
}
2764

2765 2766 2767 2768 2769 2770 2771 2772 2773
static int srpt_queue_data_in(struct se_cmd *cmd)
{
	srpt_queue_response(cmd);
	return 0;
}

static void srpt_queue_tm_rsp(struct se_cmd *cmd)
{
	srpt_queue_response(cmd);
2774 2775
}

2776 2777 2778 2779
static void srpt_aborted_task(struct se_cmd *cmd)
{
}

2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
static int srpt_queue_status(struct se_cmd *cmd)
{
	struct srpt_send_ioctx *ioctx;

	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
	BUG_ON(ioctx->sense_data != cmd->sense_buffer);
	if (cmd->se_cmd_flags &
	    (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
		WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
	ioctx->queue_status_only = true;
2790 2791
	srpt_queue_response(cmd);
	return 0;
2792 2793 2794 2795 2796 2797 2798 2799 2800
}

static void srpt_refresh_port_work(struct work_struct *work)
{
	struct srpt_port *sport = container_of(work, struct srpt_port, work);

	srpt_refresh_port(sport);
}

2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814
static bool srpt_ch_list_empty(struct srpt_port *sport)
{
	struct srpt_nexus *nexus;
	bool res = true;

	rcu_read_lock();
	list_for_each_entry(nexus, &sport->nexus_list, entry)
		if (!list_empty(&nexus->ch_list))
			res = false;
	rcu_read_unlock();

	return res;
}

2815
/**
B
Bart Van Assche 已提交
2816 2817
 * srpt_release_sport - disable login and wait for associated channels
 * @sport: SRPT HCA port.
2818
 */
B
Bart Van Assche 已提交
2819
static int srpt_release_sport(struct srpt_port *sport)
2820
{
2821 2822
	struct srpt_nexus *nexus, *next_n;
	struct srpt_rdma_ch *ch;
2823 2824 2825

	WARN_ON_ONCE(irqs_disabled());

B
Bart Van Assche 已提交
2826 2827 2828
	mutex_lock(&sport->mutex);
	srpt_set_enabled(sport, false);
	mutex_unlock(&sport->mutex);
2829

2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
	while (wait_event_timeout(sport->ch_releaseQ,
				  srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
		pr_info("%s_%d: waiting for session unregistration ...\n",
			sport->sdev->device->name, sport->port);
		rcu_read_lock();
		list_for_each_entry(nexus, &sport->nexus_list, entry) {
			list_for_each_entry(ch, &nexus->ch_list, list) {
				pr_info("%s-%d: state %s\n",
					ch->sess_name, ch->qp->qp_num,
					get_ch_state_name(ch->state));
			}
		}
		rcu_read_unlock();
	}

	mutex_lock(&sport->mutex);
	list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
		list_del(&nexus->entry);
		kfree_rcu(nexus, rcu);
	}
	mutex_unlock(&sport->mutex);
2851 2852 2853 2854

	return 0;
}

2855
static struct se_wwn *__srpt_lookup_wwn(const char *name)
2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
{
	struct ib_device *dev;
	struct srpt_device *sdev;
	struct srpt_port *sport;
	int i;

	list_for_each_entry(sdev, &srpt_dev_list, list) {
		dev = sdev->device;
		if (!dev)
			continue;

		for (i = 0; i < dev->phys_port_cnt; i++) {
			sport = &sdev->port[i];

2870 2871 2872 2873
			if (strcmp(sport->port_guid, name) == 0)
				return &sport->port_guid_wwn;
			if (strcmp(sport->port_gid, name) == 0)
				return &sport->port_gid_wwn;
2874 2875 2876 2877 2878 2879
		}
	}

	return NULL;
}

2880
static struct se_wwn *srpt_lookup_wwn(const char *name)
2881
{
2882
	struct se_wwn *wwn;
2883 2884

	spin_lock(&srpt_dev_lock);
2885
	wwn = __srpt_lookup_wwn(name);
2886 2887
	spin_unlock(&srpt_dev_lock);

2888
	return wwn;
2889 2890
}

2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
static void srpt_free_srq(struct srpt_device *sdev)
{
	if (!sdev->srq)
		return;

	ib_destroy_srq(sdev->srq);
	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
			     sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
	sdev->srq = NULL;
}

static int srpt_alloc_srq(struct srpt_device *sdev)
{
	struct ib_srq_init_attr srq_attr = {
		.event_handler = srpt_srq_event,
		.srq_context = (void *)sdev,
		.attr.max_wr = sdev->srq_size,
		.attr.max_sge = 1,
		.srq_type = IB_SRQT_BASIC,
	};
	struct ib_device *device = sdev->device;
	struct ib_srq *srq;
	int i;

	WARN_ON_ONCE(sdev->srq);
	srq = ib_create_srq(sdev->pd, &srq_attr);
	if (IS_ERR(srq)) {
		pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
		return PTR_ERR(srq);
	}

	pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
		 sdev->device->attrs.max_srq_wr, device->name);

	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
				      sizeof(*sdev->ioctx_ring[0]),
				      srp_max_req_size, DMA_FROM_DEVICE);
	if (!sdev->ioctx_ring) {
		ib_destroy_srq(srq);
		return -ENOMEM;
	}

	sdev->use_srq = true;
	sdev->srq = srq;

2937 2938
	for (i = 0; i < sdev->srq_size; ++i) {
		INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
2939
		srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
2940
	}
2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960

	return 0;
}

static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
{
	struct ib_device *device = sdev->device;
	int ret = 0;

	if (!use_srq) {
		srpt_free_srq(sdev);
		sdev->use_srq = false;
	} else if (use_srq && !sdev->srq) {
		ret = srpt_alloc_srq(sdev);
	}
	pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
		 sdev->use_srq, ret);
	return ret;
}

2961
/**
2962 2963
 * srpt_add_one - InfiniBand device addition callback function
 * @device: Describes a HCA.
2964 2965 2966 2967 2968
 */
static void srpt_add_one(struct ib_device *device)
{
	struct srpt_device *sdev;
	struct srpt_port *sport;
B
Bart Van Assche 已提交
2969
	int i, ret;
2970

2971
	pr_debug("device = %p\n", device);
2972

2973 2974
	sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
		       GFP_KERNEL);
2975 2976 2977 2978
	if (!sdev)
		goto err;

	sdev->device = device;
B
Bart Van Assche 已提交
2979
	mutex_init(&sdev->sdev_mutex);
2980

2981
	sdev->pd = ib_alloc_pd(device, 0);
2982 2983 2984
	if (IS_ERR(sdev->pd))
		goto free_dev;

B
Bart Van Assche 已提交
2985
	sdev->lkey = sdev->pd->local_dma_lkey;
2986

2987
	sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
2988

2989
	srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
2990 2991 2992 2993

	if (!srpt_service_guid)
		srpt_service_guid = be64_to_cpu(device->node_guid);

B
Bart Van Assche 已提交
2994 2995 2996 2997 2998 2999 3000 3001 3002
	if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
		sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
	if (IS_ERR(sdev->cm_id)) {
		pr_info("ib_create_cm_id() failed: %ld\n",
			PTR_ERR(sdev->cm_id));
		sdev->cm_id = NULL;
		if (!rdma_cm_id)
			goto err_ring;
	}
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014

	/* print out target login information */
	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
		 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
		 srpt_service_guid, srpt_service_guid);

	/*
	 * We do not have a consistent service_id (ie. also id_ext of target_id)
	 * to identify this target. We currently use the guid of the first HCA
	 * in the system as service_id; therefore, the target_id will change
	 * if this HCA is gone bad and replaced by different HCA
	 */
B
Bart Van Assche 已提交
3015 3016 3017 3018 3019 3020
	ret = sdev->cm_id ?
		ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
		0;
	if (ret < 0) {
		pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
		       sdev->cm_id->state);
3021
		goto err_cm;
B
Bart Van Assche 已提交
3022
	}
3023 3024 3025

	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
			      srpt_event_handler);
3026
	ib_register_event_handler(&sdev->event_handler);
3027 3028 3029

	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
		sport = &sdev->port[i - 1];
3030
		INIT_LIST_HEAD(&sport->nexus_list);
B
Bart Van Assche 已提交
3031 3032
		init_waitqueue_head(&sport->ch_releaseQ);
		mutex_init(&sport->mutex);
3033 3034 3035 3036 3037
		sport->sdev = sdev;
		sport->port = i;
		sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
		sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
		sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3038
		sport->port_attrib.use_srq = false;
3039 3040 3041
		INIT_WORK(&sport->work, srpt_refresh_port_work);

		if (srpt_refresh_port(sport)) {
3042
			pr_err("MAD registration failed for %s-%d.\n",
3043
			       sdev->device->name, i);
3044
			goto err_event;
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
		}
	}

	spin_lock(&srpt_dev_lock);
	list_add_tail(&sdev->list, &srpt_dev_list);
	spin_unlock(&srpt_dev_lock);

out:
	ib_set_client_data(device, &srpt_client, sdev);
	pr_debug("added %s.\n", device->name);
	return;

err_event:
	ib_unregister_event_handler(&sdev->event_handler);
err_cm:
B
Bart Van Assche 已提交
3060 3061
	if (sdev->cm_id)
		ib_destroy_cm_id(sdev->cm_id);
3062
err_ring:
3063
	srpt_free_srq(sdev);
3064 3065 3066 3067 3068
	ib_dealloc_pd(sdev->pd);
free_dev:
	kfree(sdev);
err:
	sdev = NULL;
3069
	pr_info("%s(%s) failed.\n", __func__, device->name);
3070 3071 3072 3073
	goto out;
}

/**
3074 3075 3076
 * srpt_remove_one - InfiniBand device removal callback function
 * @device: Describes a HCA.
 * @client_data: The value passed as the third argument to ib_set_client_data().
3077
 */
3078
static void srpt_remove_one(struct ib_device *device, void *client_data)
3079
{
3080
	struct srpt_device *sdev = client_data;
3081 3082 3083
	int i;

	if (!sdev) {
3084
		pr_info("%s(%s): nothing to do.\n", __func__, device->name);
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
		return;
	}

	srpt_unregister_mad_agent(sdev);

	ib_unregister_event_handler(&sdev->event_handler);

	/* Cancel any work queued by the just unregistered IB event handler. */
	for (i = 0; i < sdev->device->phys_port_cnt; i++)
		cancel_work_sync(&sdev->port[i].work);

B
Bart Van Assche 已提交
3096 3097 3098 3099
	if (sdev->cm_id)
		ib_destroy_cm_id(sdev->cm_id);

	ib_set_client_data(device, &srpt_client, NULL);
3100 3101 3102 3103 3104 3105 3106 3107 3108

	/*
	 * Unregistering a target must happen after destroying sdev->cm_id
	 * such that no new SRP_LOGIN_REQ information units can arrive while
	 * destroying the target.
	 */
	spin_lock(&srpt_dev_lock);
	list_del(&sdev->list);
	spin_unlock(&srpt_dev_lock);
B
Bart Van Assche 已提交
3109 3110 3111

	for (i = 0; i < sdev->device->phys_port_cnt; i++)
		srpt_release_sport(&sdev->port[i]);
3112

3113 3114
	srpt_free_srq(sdev);

3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
	ib_dealloc_pd(sdev->pd);

	kfree(sdev);
}

static struct ib_client srpt_client = {
	.name = DRV_NAME,
	.add = srpt_add_one,
	.remove = srpt_remove_one
};

static int srpt_check_true(struct se_portal_group *se_tpg)
{
	return 1;
}

static int srpt_check_false(struct se_portal_group *se_tpg)
{
	return 0;
}

static char *srpt_get_fabric_name(void)
{
	return "srpt";
}

3141 3142 3143 3144 3145
static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
{
	return tpg->se_tpg_wwn->priv;
}

3146 3147
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
3148
	struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3149

3150 3151 3152 3153
	WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
		     tpg != &sport->port_gid_tpg);
	return tpg == &sport->port_guid_tpg ? sport->port_guid :
		sport->port_gid;
3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
}

static u16 srpt_get_tag(struct se_portal_group *tpg)
{
	return 1;
}

static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
	return 1;
}

static void srpt_release_cmd(struct se_cmd *se_cmd)
{
3168 3169 3170
	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
				struct srpt_send_ioctx, cmd);
	struct srpt_rdma_ch *ch = ioctx->ch;
3171
	unsigned long flags;
3172

3173 3174
	WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
		     !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3175

3176 3177 3178
	if (ioctx->n_rw_ctx) {
		srpt_free_rw_ctxs(ch, ioctx);
		ioctx->n_rw_ctx = 0;
3179 3180
	}

3181 3182 3183
	spin_lock_irqsave(&ch->spinlock, flags);
	list_add(&ioctx->free_list, &ch->free_list);
	spin_unlock_irqrestore(&ch->spinlock, flags);
3184 3185 3186
}

/**
3187 3188
 * srpt_close_session - forcibly close a session
 * @se_sess: SCSI target session.
3189 3190 3191 3192 3193 3194 3195
 *
 * Callback function invoked by the TCM core to clean up sessions associated
 * with a node ACL when the user invokes
 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
 */
static void srpt_close_session(struct se_session *se_sess)
{
3196
	struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3197

3198
	srpt_disconnect_ch_sync(ch);
3199 3200 3201
}

/**
3202 3203
 * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
 * @se_sess: SCSI target session.
3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224
 *
 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
 * This object represents an arbitrary integer used to uniquely identify a
 * particular attached remote initiator port to a particular SCSI target port
 * within a particular SCSI target device within a particular SCSI instance.
 */
static u32 srpt_sess_get_index(struct se_session *se_sess)
{
	return 0;
}

static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
{
}

/* Note: only used from inside debug printk's by the TCM core. */
static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
{
	struct srpt_send_ioctx *ioctx;

	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3225
	return ioctx->state;
3226 3227
}

3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
static int srpt_parse_guid(u64 *guid, const char *name)
{
	u16 w[4];
	int ret = -EINVAL;

	if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
		goto out;
	*guid = get_unaligned_be64(w);
	ret = 0;
out:
	return ret;
}

3241
/**
3242
 * srpt_parse_i_port_id - parse an initiator port ID
3243 3244 3245 3246 3247 3248 3249
 * @name: ASCII representation of a 128-bit initiator port ID.
 * @i_port_id: Binary 128-bit port ID.
 */
static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
	const char *p;
	unsigned len, count, leading_zero_bytes;
3250
	int ret;
3251 3252

	p = name;
3253
	if (strncasecmp(p, "0x", 2) == 0)
3254 3255 3256 3257 3258 3259 3260 3261
		p += 2;
	ret = -EINVAL;
	len = strlen(p);
	if (len % 2)
		goto out;
	count = min(len / 2, 16U);
	leading_zero_bytes = 16 - count;
	memset(i_port_id, 0, leading_zero_bytes);
3262
	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
B
Bart Van Assche 已提交
3263

3264 3265 3266 3267 3268
out:
	return ret;
}

/*
B
Bart Van Assche 已提交
3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
 * configfs callback function invoked for mkdir
 * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
 *
 * i_port_id must be an initiator port GUID, GID or IP address. See also the
 * target_alloc_session() calls in this driver. Examples of valid initiator
 * port IDs:
 * 0x0000000000000000505400fffe4a0b7b
 * 0000000000000000505400fffe4a0b7b
 * 5054:00ff:fe4a:0b7b
 * 192.168.122.76
3279
 */
3280
static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3281
{
B
Bart Van Assche 已提交
3282
	struct sockaddr_storage sa;
3283
	u64 guid;
3284
	u8 i_port_id[16];
3285
	int ret;
3286

3287 3288 3289
	ret = srpt_parse_guid(&guid, name);
	if (ret < 0)
		ret = srpt_parse_i_port_id(i_port_id, name);
B
Bart Van Assche 已提交
3290 3291 3292
	if (ret < 0)
		ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
					   &sa);
3293
	if (ret < 0)
3294
		pr_err("invalid initiator port ID %s\n", name);
3295
	return ret;
3296 3297
}

3298 3299
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
		char *page)
3300
{
3301
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3302
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3303 3304 3305 3306

	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
}

3307 3308
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
		const char *page, size_t count)
3309
{
3310
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3311
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3312 3313 3314
	unsigned long val;
	int ret;

3315
	ret = kstrtoul(page, 0, &val);
3316
	if (ret < 0) {
3317
		pr_err("kstrtoul() failed with ret: %d\n", ret);
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
		return -EINVAL;
	}
	if (val > MAX_SRPT_RDMA_SIZE) {
		pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
			MAX_SRPT_RDMA_SIZE);
		return -EINVAL;
	}
	if (val < DEFAULT_MAX_RDMA_SIZE) {
		pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
			val, DEFAULT_MAX_RDMA_SIZE);
		return -EINVAL;
	}
	sport->port_attrib.srp_max_rdma_size = val;

	return count;
}

3335 3336
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
		char *page)
3337
{
3338
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3339
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3340 3341 3342 3343

	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
}

3344 3345
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
		const char *page, size_t count)
3346
{
3347
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3348
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3349 3350 3351
	unsigned long val;
	int ret;

3352
	ret = kstrtoul(page, 0, &val);
3353
	if (ret < 0) {
3354
		pr_err("kstrtoul() failed with ret: %d\n", ret);
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
		return -EINVAL;
	}
	if (val > MAX_SRPT_RSP_SIZE) {
		pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
			MAX_SRPT_RSP_SIZE);
		return -EINVAL;
	}
	if (val < MIN_MAX_RSP_SIZE) {
		pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
			MIN_MAX_RSP_SIZE);
		return -EINVAL;
	}
	sport->port_attrib.srp_max_rsp_size = val;

	return count;
}

3372 3373
static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
		char *page)
3374
{
3375
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3376
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3377 3378 3379 3380

	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
}

3381 3382
static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
		const char *page, size_t count)
3383
{
3384
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3385
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3386 3387 3388
	unsigned long val;
	int ret;

3389
	ret = kstrtoul(page, 0, &val);
3390
	if (ret < 0) {
3391
		pr_err("kstrtoul() failed with ret: %d\n", ret);
3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
		return -EINVAL;
	}
	if (val > MAX_SRPT_SRQ_SIZE) {
		pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
			MAX_SRPT_SRQ_SIZE);
		return -EINVAL;
	}
	if (val < MIN_SRPT_SRQ_SIZE) {
		pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
			MIN_SRPT_SRQ_SIZE);
		return -EINVAL;
	}
	sport->port_attrib.srp_sq_size = val;

	return count;
}

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
					    char *page)
{
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);

	return sprintf(page, "%d\n", sport->port_attrib.use_srq);
}

static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
					     const char *page, size_t count)
{
	struct se_portal_group *se_tpg = attrib_to_tpg(item);
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3423
	struct srpt_device *sdev = sport->sdev;
3424
	unsigned long val;
3425
	bool enabled;
3426 3427 3428 3429 3430 3431 3432
	int ret;

	ret = kstrtoul(page, 0, &val);
	if (ret < 0)
		return ret;
	if (val != !!val)
		return -EINVAL;
3433

B
Bart Van Assche 已提交
3434
	ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3435 3436
	if (ret < 0)
		return ret;
B
Bart Van Assche 已提交
3437 3438 3439
	ret = mutex_lock_interruptible(&sport->mutex);
	if (ret < 0)
		goto unlock_sdev;
3440 3441 3442
	enabled = sport->enabled;
	/* Log out all initiator systems before changing 'use_srq'. */
	srpt_set_enabled(sport, false);
3443
	sport->port_attrib.use_srq = val;
3444 3445
	srpt_use_srq(sdev, sport->port_attrib.use_srq);
	srpt_set_enabled(sport, enabled);
B
Bart Van Assche 已提交
3446 3447 3448 3449
	ret = count;
	mutex_unlock(&sport->mutex);
unlock_sdev:
	mutex_unlock(&sdev->sdev_mutex);
3450

B
Bart Van Assche 已提交
3451
	return ret;
3452 3453
}

3454 3455 3456
CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rdma_size);
CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rsp_size);
CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_sq_size);
3457
CONFIGFS_ATTR(srpt_tpg_attrib_,  use_srq);
3458 3459

static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3460 3461 3462
	&srpt_tpg_attrib_attr_srp_max_rdma_size,
	&srpt_tpg_attrib_attr_srp_max_rsp_size,
	&srpt_tpg_attrib_attr_srp_sq_size,
3463
	&srpt_tpg_attrib_attr_use_srq,
3464 3465 3466
	NULL,
};

B
Bart Van Assche 已提交
3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555
static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
{
	struct rdma_cm_id *rdma_cm_id;
	int ret;

	rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
				    NULL, RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(rdma_cm_id)) {
		pr_err("RDMA/CM ID creation failed: %ld\n",
		       PTR_ERR(rdma_cm_id));
		goto out;
	}

	ret = rdma_bind_addr(rdma_cm_id, listen_addr);
	if (ret) {
		char addr_str[64];

		snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
		pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
		       addr_str, ret);
		rdma_destroy_id(rdma_cm_id);
		rdma_cm_id = ERR_PTR(ret);
		goto out;
	}

	ret = rdma_listen(rdma_cm_id, 128);
	if (ret) {
		pr_err("rdma_listen() failed: %d\n", ret);
		rdma_destroy_id(rdma_cm_id);
		rdma_cm_id = ERR_PTR(ret);
	}

out:
	return rdma_cm_id;
}

static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
{
	return sprintf(page, "%d\n", rdma_cm_port);
}

static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
				       const char *page, size_t count)
{
	struct sockaddr_in  addr4 = { .sin_family  = AF_INET  };
	struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
	struct rdma_cm_id *new_id = NULL;
	u16 val;
	int ret;

	ret = kstrtou16(page, 0, &val);
	if (ret < 0)
		return ret;
	ret = count;
	if (rdma_cm_port == val)
		goto out;

	if (val) {
		addr6.sin6_port = cpu_to_be16(val);
		new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
		if (IS_ERR(new_id)) {
			addr4.sin_port = cpu_to_be16(val);
			new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
			if (IS_ERR(new_id)) {
				ret = PTR_ERR(new_id);
				goto out;
			}
		}
	}

	mutex_lock(&rdma_cm_mutex);
	rdma_cm_port = val;
	swap(rdma_cm_id, new_id);
	mutex_unlock(&rdma_cm_mutex);

	if (new_id)
		rdma_destroy_id(new_id);
	ret = count;
out:
	return ret;
}

CONFIGFS_ATTR(srpt_, rdma_cm_port);

static struct configfs_attribute *srpt_da_attrs[] = {
	&srpt_attr_rdma_cm_port,
	NULL,
};

3556
static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3557
{
3558
	struct se_portal_group *se_tpg = to_tpg(item);
3559
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3560 3561 3562 3563

	return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
}

3564 3565
static ssize_t srpt_tpg_enable_store(struct config_item *item,
		const char *page, size_t count)
3566
{
3567
	struct se_portal_group *se_tpg = to_tpg(item);
3568
	struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3569 3570 3571
	unsigned long tmp;
        int ret;

3572
	ret = kstrtoul(page, 0, &tmp);
3573
	if (ret < 0) {
3574
		pr_err("Unable to extract srpt_tpg_store_enable\n");
3575 3576 3577 3578
		return -EINVAL;
	}

	if ((tmp != 0) && (tmp != 1)) {
3579
		pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3580 3581 3582
		return -EINVAL;
	}

B
Bart Van Assche 已提交
3583
	mutex_lock(&sport->mutex);
3584
	srpt_set_enabled(sport, tmp);
B
Bart Van Assche 已提交
3585
	mutex_unlock(&sport->mutex);
3586

3587 3588 3589
	return count;
}

3590
CONFIGFS_ATTR(srpt_tpg_, enable);
3591 3592

static struct configfs_attribute *srpt_tpg_attrs[] = {
3593
	&srpt_tpg_attr_enable,
3594 3595 3596 3597
	NULL,
};

/**
3598 3599 3600 3601
 * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
 * @wwn: Corresponds to $driver/$port.
 * @group: Not used.
 * @name: $tpg.
3602 3603 3604 3605 3606
 */
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
					     struct config_group *group,
					     const char *name)
{
3607 3608
	struct srpt_port *sport = wwn->priv;
	static struct se_portal_group *tpg;
3609 3610
	int res;

3611 3612 3613 3614 3615
	WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
		     wwn != &sport->port_gid_wwn);
	tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
		&sport->port_gid_tpg;
	res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3616 3617 3618
	if (res)
		return ERR_PTR(res);

3619
	return tpg;
3620 3621 3622
}

/**
3623 3624
 * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
 * @tpg: Target portal group to deregister.
3625 3626 3627
 */
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
3628
	struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3629 3630

	sport->enabled = false;
3631
	core_tpg_deregister(tpg);
3632 3633 3634
}

/**
3635 3636 3637 3638
 * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
 * @tf: Not used.
 * @group: Not used.
 * @name: $port.
3639 3640 3641 3642 3643
 */
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
				      struct config_group *group,
				      const char *name)
{
3644
	return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3645 3646 3647
}

/**
3648 3649
 * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
 * @wwn: $port.
3650 3651 3652 3653 3654
 */
static void srpt_drop_tport(struct se_wwn *wwn)
{
}

3655
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3656 3657 3658 3659
{
	return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
}

3660
CONFIGFS_ATTR_RO(srpt_wwn_, version);
3661 3662

static struct configfs_attribute *srpt_wwn_attrs[] = {
3663
	&srpt_wwn_attr_version,
3664 3665 3666
	NULL,
};

3667 3668 3669
static const struct target_core_fabric_ops srpt_template = {
	.module				= THIS_MODULE,
	.name				= "srpt",
3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686
	.get_fabric_name		= srpt_get_fabric_name,
	.tpg_get_wwn			= srpt_get_fabric_wwn,
	.tpg_get_tag			= srpt_get_tag,
	.tpg_check_demo_mode		= srpt_check_false,
	.tpg_check_demo_mode_cache	= srpt_check_true,
	.tpg_check_demo_mode_write_protect = srpt_check_true,
	.tpg_check_prod_mode_write_protect = srpt_check_false,
	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
	.release_cmd			= srpt_release_cmd,
	.check_stop_free		= srpt_check_stop_free,
	.close_session			= srpt_close_session,
	.sess_get_index			= srpt_sess_get_index,
	.sess_get_initiator_sid		= NULL,
	.write_pending			= srpt_write_pending,
	.write_pending_status		= srpt_write_pending_status,
	.set_default_node_attributes	= srpt_set_default_node_attrs,
	.get_cmd_state			= srpt_get_tcm_cmd_state,
3687
	.queue_data_in			= srpt_queue_data_in,
3688
	.queue_status			= srpt_queue_status,
3689
	.queue_tm_rsp			= srpt_queue_tm_rsp,
3690
	.aborted_task			= srpt_aborted_task,
3691 3692 3693 3694 3695 3696 3697 3698
	/*
	 * Setup function pointers for generic logic in
	 * target_core_fabric_configfs.c
	 */
	.fabric_make_wwn		= srpt_make_tport,
	.fabric_drop_wwn		= srpt_drop_tport,
	.fabric_make_tpg		= srpt_make_tpg,
	.fabric_drop_tpg		= srpt_drop_tpg,
3699
	.fabric_init_nodeacl		= srpt_init_nodeacl,
3700

B
Bart Van Assche 已提交
3701
	.tfc_discovery_attrs		= srpt_da_attrs,
3702 3703 3704
	.tfc_wwn_attrs			= srpt_wwn_attrs,
	.tfc_tpg_base_attrs		= srpt_tpg_attrs,
	.tfc_tpg_attrib_attrs		= srpt_tpg_attrib_attrs,
3705 3706 3707
};

/**
3708
 * srpt_init_module - kernel module initialization
3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
 *
 * Note: Since ib_register_client() registers callback functions, and since at
 * least one of these callback functions (srpt_add_one()) calls target core
 * functions, this driver must be registered with the target core before
 * ib_register_client() is called.
 */
static int __init srpt_init_module(void)
{
	int ret;

	ret = -EINVAL;
	if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3721
		pr_err("invalid value %d for kernel module parameter"
3722 3723 3724 3725 3726 3727 3728
		       " srp_max_req_size -- must be at least %d.\n",
		       srp_max_req_size, MIN_MAX_REQ_SIZE);
		goto out;
	}

	if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
	    || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3729
		pr_err("invalid value %d for kernel module parameter"
3730 3731 3732 3733 3734
		       " srpt_srq_size -- must be in the range [%d..%d].\n",
		       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
		goto out;
	}

3735 3736
	ret = target_register_template(&srpt_template);
	if (ret)
3737 3738 3739 3740
		goto out;

	ret = ib_register_client(&srpt_client);
	if (ret) {
3741
		pr_err("couldn't register IB client\n");
3742 3743 3744 3745 3746 3747
		goto out_unregister_target;
	}

	return 0;

out_unregister_target:
3748
	target_unregister_template(&srpt_template);
3749 3750 3751 3752 3753 3754
out:
	return ret;
}

static void __exit srpt_cleanup_module(void)
{
B
Bart Van Assche 已提交
3755 3756
	if (rdma_cm_id)
		rdma_destroy_id(rdma_cm_id);
3757
	ib_unregister_client(&srpt_client);
3758
	target_unregister_template(&srpt_template);
3759 3760 3761 3762
}

module_init(srpt_init_module);
module_exit(srpt_cleanup_module);