loop.c 17.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * NVMe over Fabrics loopback device.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/nvme.h>
#include <linux/module.h>
#include <linux/parser.h>
#include "nvmet.h"
#include "../host/nvme.h"
#include "../host/fabrics.h"

#define NVME_LOOP_MAX_SEGMENTS		256

/*
 * We handle AEN commands ourselves and don't even let the
 * block layer know about them.
 */
#define NVME_LOOP_NR_AEN_COMMANDS	1
#define NVME_LOOP_AQ_BLKMQ_DEPTH	\
32
	(NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
33 34

struct nvme_loop_iod {
35
	struct nvme_request	nvme_req;
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	struct nvme_command	cmd;
	struct nvme_completion	rsp;
	struct nvmet_req	req;
	struct nvme_loop_queue	*queue;
	struct work_struct	work;
	struct sg_table		sg_table;
	struct scatterlist	first_sgl[];
};

struct nvme_loop_ctrl {
	struct nvme_loop_queue	*queues;

	struct blk_mq_tag_set	admin_tag_set;

	struct list_head	list;
	struct blk_mq_tag_set	tag_set;
	struct nvme_loop_iod	async_event_iod;
	struct nvme_ctrl	ctrl;

	struct nvmet_ctrl	*target_ctrl;
};

static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
}

struct nvme_loop_queue {
	struct nvmet_cq		nvme_cq;
	struct nvmet_sq		nvme_sq;
	struct nvme_loop_ctrl	*ctrl;
};

static struct nvmet_port *nvmet_loop_port;

static LIST_HEAD(nvme_loop_ctrl_list);
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);

static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);

static struct nvmet_fabrics_ops nvme_loop_ops;

static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
{
	return queue - queue->ctrl->queues;
}

static void nvme_loop_complete_rq(struct request *req)
{
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);

	nvme_cleanup_cmd(req);
	sg_free_table_chained(&iod->sg_table, true);
90
	nvme_complete_rq(req);
91 92
}

93 94 95
static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
{
	u32 queue_idx = nvme_loop_queue_idx(queue);
96

97 98 99
	if (queue_idx == 0)
		return queue->ctrl->admin_tag_set.tags[queue_idx];
	return queue->ctrl->tag_set.tags[queue_idx - 1];
100 101
}

102
static void nvme_loop_queue_response(struct nvmet_req *req)
103
{
104 105 106
	struct nvme_loop_queue *queue =
		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
	struct nvme_completion *cqe = req->rsp;
107 108 109 110 111 112 113

	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
114
	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
115
			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
116
		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
117
				&cqe->result);
118
	} else {
119 120 121 122 123 124 125 126 127
		struct request *rq;

		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
		if (!rq) {
			dev_err(queue->ctrl->ctrl.device,
				"tag 0x%x on queue %d not found\n",
				cqe->command_id, nvme_loop_queue_idx(queue));
			return;
		}
128

129
		nvme_end_request(rq, cqe->status, cqe->result);
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	}
}

static void nvme_loop_execute_work(struct work_struct *work)
{
	struct nvme_loop_iod *iod =
		container_of(work, struct nvme_loop_iod, work);

	iod->req.execute(&iod->req);
}

static enum blk_eh_timer_return
nvme_loop_timeout(struct request *rq, bool reserved)
{
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);

	/* queue error recovery */
147
	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
148 149

	/* fail with DNR on admin cmd timeout */
150
	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
151 152 153 154

	return BLK_EH_HANDLED;
}

155
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
156 157 158 159 160 161
		const struct blk_mq_queue_data *bd)
{
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_loop_queue *queue = hctx->driver_data;
	struct request *req = bd->rq;
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
162
	blk_status_t ret;
163 164

	ret = nvme_setup_cmd(ns, req, &iod->cmd);
165
	if (ret)
166 167 168 169 170 171 172 173 174
		return ret;

	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
	iod->req.port = nvmet_loop_port;
	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
			&queue->nvme_sq, &nvme_loop_ops)) {
		nvme_cleanup_cmd(req);
		blk_mq_start_request(req);
		nvme_loop_queue_response(&iod->req);
175
		return BLK_STS_OK;
176 177 178 179
	}

	if (blk_rq_bytes(req)) {
		iod->sg_table.sgl = iod->first_sgl;
180
		if (sg_alloc_table_chained(&iod->sg_table,
181
				blk_rq_nr_phys_segments(req),
182 183
				iod->sg_table.sgl))
			return BLK_STS_RESOURCE;
184 185 186 187 188 189 190 191

		iod->req.sg = iod->sg_table.sgl;
		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
	}

	blk_mq_start_request(req);

	schedule_work(&iod->work);
192
	return BLK_STS_OK;
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
}

static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
	struct nvme_loop_queue *queue = &ctrl->queues[0];
	struct nvme_loop_iod *iod = &ctrl->async_event_iod;

	memset(&iod->cmd, 0, sizeof(iod->cmd));
	iod->cmd.common.opcode = nvme_admin_async_event;
	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;

	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
			&nvme_loop_ops)) {
		dev_err(ctrl->ctrl.device, "failed async event work\n");
		return;
	}

	schedule_work(&iod->work);
}

static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
		struct nvme_loop_iod *iod, unsigned int queue_idx)
{
	iod->req.cmd = &iod->cmd;
	iod->req.rsp = &iod->rsp;
	iod->queue = &ctrl->queues[queue_idx];
	INIT_WORK(&iod->work, nvme_loop_execute_work);
	return 0;
}

225 226 227
static int nvme_loop_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
228
{
229
	struct nvme_loop_ctrl *ctrl = set->driver_data;
230

231 232
	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
233 234 235 236 237 238 239 240
}

static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];

241
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258

	hctx->driver_data = queue;
	return 0;
}

static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[0];

	BUG_ON(hctx_idx != 0);

	hctx->driver_data = queue;
	return 0;
}

259
static const struct blk_mq_ops nvme_loop_mq_ops = {
260 261 262 263 264 265 266
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
	.init_request	= nvme_loop_init_request,
	.init_hctx	= nvme_loop_init_hctx,
	.timeout	= nvme_loop_timeout,
};

267
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
268 269
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
270
	.init_request	= nvme_loop_init_request,
271 272 273 274 275 276
	.init_hctx	= nvme_loop_init_admin_hctx,
	.timeout	= nvme_loop_timeout,
};

static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
277
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	blk_cleanup_queue(ctrl->ctrl.admin_q);
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
}

static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);

	if (list_empty(&ctrl->list))
		goto free_ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_del(&ctrl->list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

	if (nctrl->tagset) {
		blk_cleanup_queue(ctrl->ctrl.connect_q);
		blk_mq_free_tag_set(&ctrl->tag_set);
	}
	kfree(ctrl->queues);
	nvmf_free_options(nctrl->opts);
free_ctrl:
	kfree(ctrl);
}

303 304 305 306
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i;

307
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}

static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
{
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
	unsigned int nr_io_queues;
	int ret, i;

	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
	if (ret || !nr_io_queues)
		return ret;

	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);

	for (i = 1; i <= nr_io_queues; i++) {
		ctrl->queues[i].ctrl = ctrl;
		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
		if (ret)
			goto out_destroy_queues;

330
		ctrl->ctrl.queue_count++;
331 332 333 334 335 336 337 338 339
	}

	return 0;

out_destroy_queues:
	nvme_loop_destroy_io_queues(ctrl);
	return ret;
}

340 341 342 343
static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i, ret;

344
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
345 346 347 348 349 350 351 352
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
		if (ret)
			return ret;
	}

	return 0;
}

353 354 355 356 357 358 359 360 361 362 363 364 365 366
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
	int error;

	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
	ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->admin_tag_set.driver_data = ctrl;
	ctrl->admin_tag_set.nr_hw_queues = 1;
	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
367
	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
368 369 370 371 372

	ctrl->queues[0].ctrl = ctrl;
	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
	if (error)
		return error;
373
	ctrl->ctrl.queue_count = 1;
374 375 376 377

	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
	if (error)
		goto out_free_sq;
378
	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
379 380 381 382 383 384 385 386 387 388 389

	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.admin_q)) {
		error = PTR_ERR(ctrl->ctrl.admin_q);
		goto out_free_tagset;
	}

	error = nvmf_connect_admin_queue(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

390
	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
391 392 393 394 395 396 397
	if (error) {
		dev_err(ctrl->ctrl.device,
			"prop_get NVME_REG_CAP failed\n");
		goto out_cleanup_queue;
	}

	ctrl->ctrl.sqsize =
398
		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
399

400
	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	if (error)
		goto out_cleanup_queue;

	ctrl->ctrl.max_hw_sectors =
		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);

	error = nvme_init_identify(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
	return error;
}

static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
424
	if (ctrl->ctrl.queue_count > 1) {
425 426 427
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
428
		nvme_loop_destroy_io_queues(ctrl);
429 430 431 432 433
	}

	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
		nvme_shutdown_ctrl(&ctrl->ctrl);

434
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
435 436
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
				nvme_cancel_request, &ctrl->ctrl);
437
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
438 439 440
	nvme_loop_destroy_admin_queue(ctrl);
}

441
static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
442
{
443
	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
444 445 446 447 448 449 450 451 452
}

static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
		if (ctrl->ctrl.cntlid == nctrl->cntlid)
453
			nvme_delete_ctrl(&ctrl->ctrl);
454 455 456 457 458 459
	}
	mutex_unlock(&nvme_loop_ctrl_mutex);
}

static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
460 461
	struct nvme_loop_ctrl *ctrl =
		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
462
	bool changed;
463
	int ret;
464

465
	nvme_stop_ctrl(&ctrl->ctrl);
466 467 468 469 470 471
	nvme_loop_shutdown_ctrl(ctrl);

	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_disable;

472 473 474
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
		goto out_destroy_admin;
475

476 477 478
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_destroy_io;
479

480 481 482
	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
			ctrl->ctrl.queue_count - 1);

483 484 485
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

486
	nvme_start_ctrl(&ctrl->ctrl);
487 488 489

	return;

490 491 492
out_destroy_io:
	nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin:
493 494 495 496 497 498 499 500 501 502
	nvme_loop_destroy_admin_queue(ctrl);
out_disable:
	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
}

static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
	.name			= "loop",
	.module			= THIS_MODULE,
503
	.flags			= NVME_F_FABRICS,
504 505 506 507 508
	.reg_read32		= nvmf_reg_read32,
	.reg_read64		= nvmf_reg_read64,
	.reg_write32		= nvmf_reg_write32,
	.free_ctrl		= nvme_loop_free_ctrl,
	.submit_async_event	= nvme_loop_submit_async_event,
509
	.delete_ctrl		= nvme_loop_delete_ctrl_host,
510 511 512 513
};

static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{
514
	int ret;
515

516 517
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
518 519 520 521
		return ret;

	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
	ctrl->tag_set.ops = &nvme_loop_mq_ops;
522
	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
523 524 525 526 527 528
	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
	ctrl->tag_set.numa_node = NUMA_NO_NODE;
	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->tag_set.driver_data = ctrl;
529
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
530 531 532 533 534 535 536 537 538 539 540 541 542
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
	ctrl->ctrl.tagset = &ctrl->tag_set;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
	if (ret)
		goto out_destroy_queues;

	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
	if (IS_ERR(ctrl->ctrl.connect_q)) {
		ret = PTR_ERR(ctrl->ctrl.connect_q);
		goto out_free_tagset;
	}

543 544 545
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_cleanup_connect_q;
546 547 548 549 550 551 552 553

	return 0;

out_cleanup_connect_q:
	blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
554
	nvme_loop_destroy_io_queues(ctrl);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
	return ret;
}

static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{
	struct nvme_loop_ctrl *ctrl;
	bool changed;
	int ret;

	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return ERR_PTR(-ENOMEM);
	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->list);

571
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
572 573 574 575 576 577 578 579

	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
				0 /* no quirks, we're perfect! */);
	if (ret)
		goto out_put_ctrl;

	ret = -ENOMEM;

580
	ctrl->ctrl.sqsize = opts->queue_size - 1;
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	ctrl->ctrl.kato = opts->kato;

	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
			GFP_KERNEL);
	if (!ctrl->queues)
		goto out_uninit_ctrl;

	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_free_queues;

	if (opts->queue_size > ctrl->ctrl.maxcmd) {
		/* warn if maxcmd is lower than queue_size */
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
			opts->queue_size, ctrl->ctrl.maxcmd);
		opts->queue_size = ctrl->ctrl.maxcmd;
	}

	if (opts->nr_io_queues) {
		ret = nvme_loop_create_io_queues(ctrl);
		if (ret)
			goto out_remove_admin_queue;
	}

	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);

	dev_info(ctrl->ctrl.device,
		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);

611
	nvme_get_ctrl(&ctrl->ctrl);
612 613 614 615 616 617 618 619

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

620
	nvme_start_ctrl(&ctrl->ctrl);
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682

	return &ctrl->ctrl;

out_remove_admin_queue:
	nvme_loop_destroy_admin_queue(ctrl);
out_free_queues:
	kfree(ctrl->queues);
out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
out_put_ctrl:
	nvme_put_ctrl(&ctrl->ctrl);
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
}

static int nvme_loop_add_port(struct nvmet_port *port)
{
	/*
	 * XXX: disalow adding more than one port so
	 * there is no connection rejections when a
	 * a subsystem is assigned to a port for which
	 * loop doesn't have a pointer.
	 * This scenario would be possible if we allowed
	 * more than one port to be added and a subsystem
	 * was assigned to a port other than nvmet_loop_port.
	 */

	if (nvmet_loop_port)
		return -EPERM;

	nvmet_loop_port = port;
	return 0;
}

static void nvme_loop_remove_port(struct nvmet_port *port)
{
	if (port == nvmet_loop_port)
		nvmet_loop_port = NULL;
}

static struct nvmet_fabrics_ops nvme_loop_ops = {
	.owner		= THIS_MODULE,
	.type		= NVMF_TRTYPE_LOOP,
	.add_port	= nvme_loop_add_port,
	.remove_port	= nvme_loop_remove_port,
	.queue_response = nvme_loop_queue_response,
	.delete_ctrl	= nvme_loop_delete_ctrl,
};

static struct nvmf_transport_ops nvme_loop_transport = {
	.name		= "loop",
	.create_ctrl	= nvme_loop_create_ctrl,
};

static int __init nvme_loop_init_module(void)
{
	int ret;

	ret = nvmet_register_transport(&nvme_loop_ops);
	if (ret)
		return ret;
683 684 685 686 687 688

	ret = nvmf_register_transport(&nvme_loop_transport);
	if (ret)
		nvmet_unregister_transport(&nvme_loop_ops);

	return ret;
689 690 691 692 693 694 695 696 697 698 699
}

static void __exit nvme_loop_cleanup_module(void)
{
	struct nvme_loop_ctrl *ctrl, *next;

	nvmf_unregister_transport(&nvme_loop_transport);
	nvmet_unregister_transport(&nvme_loop_ops);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
700
		nvme_delete_ctrl(&ctrl->ctrl);
701 702
	mutex_unlock(&nvme_loop_ctrl_mutex);

703
	flush_workqueue(nvme_wq);
704 705 706 707 708 709 710
}

module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);

MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */