loop.c 18.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * NVMe over Fabrics loopback device.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/nvme.h>
#include <linux/module.h>
#include <linux/parser.h>
#include "nvmet.h"
#include "../host/nvme.h"
#include "../host/fabrics.h"

#define NVME_LOOP_MAX_SEGMENTS		256

struct nvme_loop_iod {
27
	struct nvme_request	nvme_req;
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
	struct nvme_command	cmd;
	struct nvme_completion	rsp;
	struct nvmet_req	req;
	struct nvme_loop_queue	*queue;
	struct work_struct	work;
	struct sg_table		sg_table;
	struct scatterlist	first_sgl[];
};

struct nvme_loop_ctrl {
	struct nvme_loop_queue	*queues;

	struct blk_mq_tag_set	admin_tag_set;

	struct list_head	list;
	struct blk_mq_tag_set	tag_set;
	struct nvme_loop_iod	async_event_iod;
	struct nvme_ctrl	ctrl;

	struct nvmet_ctrl	*target_ctrl;
};

static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
}

55 56 57 58
enum nvme_loop_queue_flags {
	NVME_LOOP_Q_LIVE	= 0,
};

59 60 61 62
struct nvme_loop_queue {
	struct nvmet_cq		nvme_cq;
	struct nvmet_sq		nvme_sq;
	struct nvme_loop_ctrl	*ctrl;
63
	unsigned long		flags;
64 65 66 67 68 69 70 71 72 73
};

static struct nvmet_port *nvmet_loop_port;

static LIST_HEAD(nvme_loop_ctrl_list);
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);

static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);

74
static const struct nvmet_fabrics_ops nvme_loop_ops;
75 76 77 78 79 80 81 82 83 84 85 86

static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
{
	return queue - queue->ctrl->queues;
}

static void nvme_loop_complete_rq(struct request *req)
{
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);

	nvme_cleanup_cmd(req);
	sg_free_table_chained(&iod->sg_table, true);
87
	nvme_complete_rq(req);
88 89
}

90 91 92
static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
{
	u32 queue_idx = nvme_loop_queue_idx(queue);
93

94 95 96
	if (queue_idx == 0)
		return queue->ctrl->admin_tag_set.tags[queue_idx];
	return queue->ctrl->tag_set.tags[queue_idx - 1];
97 98
}

99
static void nvme_loop_queue_response(struct nvmet_req *req)
100
{
101 102 103
	struct nvme_loop_queue *queue =
		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
	struct nvme_completion *cqe = req->rsp;
104 105 106 107 108 109 110

	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
111
	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
K
Keith Busch 已提交
112
			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
113
		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
114
				&cqe->result);
115
	} else {
116 117 118 119 120 121 122 123 124
		struct request *rq;

		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
		if (!rq) {
			dev_err(queue->ctrl->ctrl.device,
				"tag 0x%x on queue %d not found\n",
				cqe->command_id, nvme_loop_queue_idx(queue));
			return;
		}
125

126
		nvme_end_request(rq, cqe->status, cqe->result);
127 128 129 130 131 132 133 134
	}
}

static void nvme_loop_execute_work(struct work_struct *work)
{
	struct nvme_loop_iod *iod =
		container_of(work, struct nvme_loop_iod, work);

135
	nvmet_req_execute(&iod->req);
136 137 138 139 140 141 142 143
}

static enum blk_eh_timer_return
nvme_loop_timeout(struct request *rq, bool reserved)
{
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);

	/* queue error recovery */
144
	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
145 146

	/* fail with DNR on admin cmd timeout */
147
	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
148 149 150 151

	return BLK_EH_HANDLED;
}

152
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
153 154 155 156 157 158
		const struct blk_mq_queue_data *bd)
{
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_loop_queue *queue = hctx->driver_data;
	struct request *req = bd->rq;
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
159
	blk_status_t ret;
160

161 162
	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
		test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
163 164 165
	if (unlikely(ret))
		return ret;

166
	ret = nvme_setup_cmd(ns, req, &iod->cmd);
167
	if (ret)
168 169
		return ret;

170
	blk_mq_start_request(req);
171 172 173
	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
	iod->req.port = nvmet_loop_port;
	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
174
			&queue->nvme_sq, &nvme_loop_ops))
175
		return BLK_STS_OK;
176

177
	if (blk_rq_payload_bytes(req)) {
178
		iod->sg_table.sgl = iod->first_sgl;
179
		if (sg_alloc_table_chained(&iod->sg_table,
180
				blk_rq_nr_phys_segments(req),
181 182
				iod->sg_table.sgl))
			return BLK_STS_RESOURCE;
183 184 185

		iod->req.sg = iod->sg_table.sgl;
		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
186
		iod->req.transfer_len = blk_rq_payload_bytes(req);
187 188 189
	}

	schedule_work(&iod->work);
190
	return BLK_STS_OK;
191 192
}

193
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
194 195 196 197 198 199 200
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
	struct nvme_loop_queue *queue = &ctrl->queues[0];
	struct nvme_loop_iod *iod = &ctrl->async_event_iod;

	memset(&iod->cmd, 0, sizeof(iod->cmd));
	iod->cmd.common.opcode = nvme_admin_async_event;
K
Keith Busch 已提交
201
	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;

	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
			&nvme_loop_ops)) {
		dev_err(ctrl->ctrl.device, "failed async event work\n");
		return;
	}

	schedule_work(&iod->work);
}

static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
		struct nvme_loop_iod *iod, unsigned int queue_idx)
{
	iod->req.cmd = &iod->cmd;
	iod->req.rsp = &iod->rsp;
	iod->queue = &ctrl->queues[queue_idx];
	INIT_WORK(&iod->work, nvme_loop_execute_work);
	return 0;
}

223 224 225
static int nvme_loop_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
226
{
227
	struct nvme_loop_ctrl *ctrl = set->driver_data;
228

229 230
	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
231 232 233 234 235 236 237 238
}

static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];

239
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	hctx->driver_data = queue;
	return 0;
}

static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[0];

	BUG_ON(hctx_idx != 0);

	hctx->driver_data = queue;
	return 0;
}

257
static const struct blk_mq_ops nvme_loop_mq_ops = {
258 259 260 261 262 263 264
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
	.init_request	= nvme_loop_init_request,
	.init_hctx	= nvme_loop_init_hctx,
	.timeout	= nvme_loop_timeout,
};

265
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
266 267
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
268
	.init_request	= nvme_loop_init_request,
269 270 271 272 273 274
	.init_hctx	= nvme_loop_init_admin_hctx,
	.timeout	= nvme_loop_timeout,
};

static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
275
	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
276
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	blk_cleanup_queue(ctrl->ctrl.admin_q);
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
}

static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);

	if (list_empty(&ctrl->list))
		goto free_ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_del(&ctrl->list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

	if (nctrl->tagset) {
		blk_cleanup_queue(ctrl->ctrl.connect_q);
		blk_mq_free_tag_set(&ctrl->tag_set);
	}
	kfree(ctrl->queues);
	nvmf_free_options(nctrl->opts);
free_ctrl:
	kfree(ctrl);
}

302 303 304 305
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i;

306 307
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
308
		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
309
	}
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
}

static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
{
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
	unsigned int nr_io_queues;
	int ret, i;

	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
	if (ret || !nr_io_queues)
		return ret;

	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);

	for (i = 1; i <= nr_io_queues; i++) {
		ctrl->queues[i].ctrl = ctrl;
		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
		if (ret)
			goto out_destroy_queues;

331
		ctrl->ctrl.queue_count++;
332 333 334 335 336 337 338 339 340
	}

	return 0;

out_destroy_queues:
	nvme_loop_destroy_io_queues(ctrl);
	return ret;
}

341 342 343 344
static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i, ret;

345
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
346 347 348
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
		if (ret)
			return ret;
349
		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
350 351 352 353 354
	}

	return 0;
}

355 356 357 358 359 360
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
	int error;

	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
K
Keith Busch 已提交
361
	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
362 363 364 365 366 367 368
	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->admin_tag_set.driver_data = ctrl;
	ctrl->admin_tag_set.nr_hw_queues = 1;
	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
369
	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
370 371 372 373 374

	ctrl->queues[0].ctrl = ctrl;
	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
	if (error)
		return error;
375
	ctrl->ctrl.queue_count = 1;
376 377 378 379

	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
	if (error)
		goto out_free_sq;
380
	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
381 382 383 384 385 386 387 388 389 390 391

	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.admin_q)) {
		error = PTR_ERR(ctrl->ctrl.admin_q);
		goto out_free_tagset;
	}

	error = nvmf_connect_admin_queue(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

392 393
	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);

394
	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
395 396 397 398 399 400 401
	if (error) {
		dev_err(ctrl->ctrl.device,
			"prop_get NVME_REG_CAP failed\n");
		goto out_cleanup_queue;
	}

	ctrl->ctrl.sqsize =
402
		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
403

404
	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
	if (error)
		goto out_cleanup_queue;

	ctrl->ctrl.max_hw_sectors =
		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);

	error = nvme_init_identify(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
	return error;
}

static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
428
	if (ctrl->ctrl.queue_count > 1) {
429 430 431
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
432
		nvme_loop_destroy_io_queues(ctrl);
433 434 435 436 437
	}

	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
		nvme_shutdown_ctrl(&ctrl->ctrl);

438
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
439 440
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
				nvme_cancel_request, &ctrl->ctrl);
441
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
442 443 444
	nvme_loop_destroy_admin_queue(ctrl);
}

445
static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
446
{
447
	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
448 449 450 451 452 453 454 455 456
}

static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
		if (ctrl->ctrl.cntlid == nctrl->cntlid)
457
			nvme_delete_ctrl(&ctrl->ctrl);
458 459 460 461 462 463
	}
	mutex_unlock(&nvme_loop_ctrl_mutex);
}

static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
464 465
	struct nvme_loop_ctrl *ctrl =
		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
466
	bool changed;
467
	int ret;
468

469
	nvme_stop_ctrl(&ctrl->ctrl);
470 471
	nvme_loop_shutdown_ctrl(ctrl);

472 473 474 475 476 477
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
		/* state change failure should never happen */
		WARN_ON_ONCE(1);
		return;
	}

478 479 480 481
	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_disable;

482 483 484
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
		goto out_destroy_admin;
485

486 487 488
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_destroy_io;
489

490 491 492
	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
			ctrl->ctrl.queue_count - 1);

493 494 495
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

496
	nvme_start_ctrl(&ctrl->ctrl);
497 498 499

	return;

500 501 502
out_destroy_io:
	nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin:
503 504 505 506 507 508 509 510 511 512
	nvme_loop_destroy_admin_queue(ctrl);
out_disable:
	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
}

static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
	.name			= "loop",
	.module			= THIS_MODULE,
513
	.flags			= NVME_F_FABRICS,
514 515 516 517 518
	.reg_read32		= nvmf_reg_read32,
	.reg_read64		= nvmf_reg_read64,
	.reg_write32		= nvmf_reg_write32,
	.free_ctrl		= nvme_loop_free_ctrl,
	.submit_async_event	= nvme_loop_submit_async_event,
519
	.delete_ctrl		= nvme_loop_delete_ctrl_host,
520 521 522 523
};

static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{
524
	int ret;
525

526 527
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
528 529 530 531
		return ret;

	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
	ctrl->tag_set.ops = &nvme_loop_mq_ops;
532
	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
533 534 535 536 537 538
	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
	ctrl->tag_set.numa_node = NUMA_NO_NODE;
	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
		SG_CHUNK_SIZE * sizeof(struct scatterlist);
	ctrl->tag_set.driver_data = ctrl;
539
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
540 541 542 543 544 545 546 547 548 549 550 551 552
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
	ctrl->ctrl.tagset = &ctrl->tag_set;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
	if (ret)
		goto out_destroy_queues;

	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
	if (IS_ERR(ctrl->ctrl.connect_q)) {
		ret = PTR_ERR(ctrl->ctrl.connect_q);
		goto out_free_tagset;
	}

553 554 555
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_cleanup_connect_q;
556 557 558 559 560 561 562 563

	return 0;

out_cleanup_connect_q:
	blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
564
	nvme_loop_destroy_io_queues(ctrl);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
	return ret;
}

static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{
	struct nvme_loop_ctrl *ctrl;
	bool changed;
	int ret;

	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return ERR_PTR(-ENOMEM);
	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->list);

581
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
582 583 584 585 586 587 588 589

	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
				0 /* no quirks, we're perfect! */);
	if (ret)
		goto out_put_ctrl;

	ret = -ENOMEM;

590
	ctrl->ctrl.sqsize = opts->queue_size - 1;
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	ctrl->ctrl.kato = opts->kato;

	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
			GFP_KERNEL);
	if (!ctrl->queues)
		goto out_uninit_ctrl;

	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_free_queues;

	if (opts->queue_size > ctrl->ctrl.maxcmd) {
		/* warn if maxcmd is lower than queue_size */
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
			opts->queue_size, ctrl->ctrl.maxcmd);
		opts->queue_size = ctrl->ctrl.maxcmd;
	}

	if (opts->nr_io_queues) {
		ret = nvme_loop_create_io_queues(ctrl);
		if (ret)
			goto out_remove_admin_queue;
	}

	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);

	dev_info(ctrl->ctrl.device,
		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);

621
	nvme_get_ctrl(&ctrl->ctrl);
622 623 624 625 626 627 628 629

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

630
	nvme_start_ctrl(&ctrl->ctrl);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

	return &ctrl->ctrl;

out_remove_admin_queue:
	nvme_loop_destroy_admin_queue(ctrl);
out_free_queues:
	kfree(ctrl->queues);
out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
out_put_ctrl:
	nvme_put_ctrl(&ctrl->ctrl);
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
}

static int nvme_loop_add_port(struct nvmet_port *port)
{
	/*
	 * XXX: disalow adding more than one port so
	 * there is no connection rejections when a
	 * a subsystem is assigned to a port for which
	 * loop doesn't have a pointer.
	 * This scenario would be possible if we allowed
	 * more than one port to be added and a subsystem
	 * was assigned to a port other than nvmet_loop_port.
	 */

	if (nvmet_loop_port)
		return -EPERM;

	nvmet_loop_port = port;
	return 0;
}

static void nvme_loop_remove_port(struct nvmet_port *port)
{
	if (port == nvmet_loop_port)
		nvmet_loop_port = NULL;
}

672
static const struct nvmet_fabrics_ops nvme_loop_ops = {
673 674 675 676 677 678 679 680 681 682
	.owner		= THIS_MODULE,
	.type		= NVMF_TRTYPE_LOOP,
	.add_port	= nvme_loop_add_port,
	.remove_port	= nvme_loop_remove_port,
	.queue_response = nvme_loop_queue_response,
	.delete_ctrl	= nvme_loop_delete_ctrl,
};

static struct nvmf_transport_ops nvme_loop_transport = {
	.name		= "loop",
683
	.module		= THIS_MODULE,
684 685 686 687 688 689 690 691 692 693
	.create_ctrl	= nvme_loop_create_ctrl,
};

static int __init nvme_loop_init_module(void)
{
	int ret;

	ret = nvmet_register_transport(&nvme_loop_ops);
	if (ret)
		return ret;
694 695 696 697 698 699

	ret = nvmf_register_transport(&nvme_loop_transport);
	if (ret)
		nvmet_unregister_transport(&nvme_loop_ops);

	return ret;
700 701 702 703 704 705 706 707 708 709 710
}

static void __exit nvme_loop_cleanup_module(void)
{
	struct nvme_loop_ctrl *ctrl, *next;

	nvmf_unregister_transport(&nvme_loop_transport);
	nvmet_unregister_transport(&nvme_loop_ops);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
711
		nvme_delete_ctrl(&ctrl->ctrl);
712 713
	mutex_unlock(&nvme_loop_ctrl_mutex);

714
	flush_workqueue(nvme_delete_wq);
715 716 717 718 719 720 721
}

module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);

MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */