loop.c 18.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * NVMe over Fabrics loopback device.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/nvme.h>
#include <linux/module.h>
#include <linux/parser.h>
#include "nvmet.h"
#include "../host/nvme.h"
#include "../host/fabrics.h"

#define NVME_LOOP_MAX_SEGMENTS		256

struct nvme_loop_iod {
19
	struct nvme_request	nvme_req;
20
	struct nvme_command	cmd;
21
	struct nvme_completion	cqe;
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
	struct nvmet_req	req;
	struct nvme_loop_queue	*queue;
	struct work_struct	work;
	struct sg_table		sg_table;
	struct scatterlist	first_sgl[];
};

struct nvme_loop_ctrl {
	struct nvme_loop_queue	*queues;

	struct blk_mq_tag_set	admin_tag_set;

	struct list_head	list;
	struct blk_mq_tag_set	tag_set;
	struct nvme_loop_iod	async_event_iod;
	struct nvme_ctrl	ctrl;

	struct nvmet_ctrl	*target_ctrl;
40
	struct nvmet_port	*port;
41 42 43 44 45 46 47
};

static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
}

48 49 50 51
enum nvme_loop_queue_flags {
	NVME_LOOP_Q_LIVE	= 0,
};

52 53 54 55
struct nvme_loop_queue {
	struct nvmet_cq		nvme_cq;
	struct nvmet_sq		nvme_sq;
	struct nvme_loop_ctrl	*ctrl;
56
	unsigned long		flags;
57 58
};

59 60
static LIST_HEAD(nvme_loop_ports);
static DEFINE_MUTEX(nvme_loop_ports_mutex);
61 62 63 64 65 66 67

static LIST_HEAD(nvme_loop_ctrl_list);
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);

static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);

68
static const struct nvmet_fabrics_ops nvme_loop_ops;
69 70 71 72 73 74 75 76 77 78

static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
{
	return queue - queue->ctrl->queues;
}

static void nvme_loop_complete_rq(struct request *req)
{
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);

79
	sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
80
	nvme_complete_rq(req);
81 82
}

83 84 85
static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
{
	u32 queue_idx = nvme_loop_queue_idx(queue);
86

87 88 89
	if (queue_idx == 0)
		return queue->ctrl->admin_tag_set.tags[queue_idx];
	return queue->ctrl->tag_set.tags[queue_idx - 1];
90 91
}

92
static void nvme_loop_queue_response(struct nvmet_req *req)
93
{
94 95
	struct nvme_loop_queue *queue =
		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
96
	struct nvme_completion *cqe = req->cqe;
97 98 99 100 101 102 103

	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
104 105
	if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
				     cqe->command_id))) {
106
		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
107
				&cqe->result);
108
	} else {
109 110 111 112 113 114 115 116 117
		struct request *rq;

		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
		if (!rq) {
			dev_err(queue->ctrl->ctrl.device,
				"tag 0x%x on queue %d not found\n",
				cqe->command_id, nvme_loop_queue_idx(queue));
			return;
		}
118

119
		nvme_end_request(rq, cqe->status, cqe->result);
120 121 122 123 124 125 126 127
	}
}

static void nvme_loop_execute_work(struct work_struct *work)
{
	struct nvme_loop_iod *iod =
		container_of(work, struct nvme_loop_iod, work);

128
	iod->req.execute(&iod->req);
129 130
}

131
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
132 133 134 135 136 137
		const struct blk_mq_queue_data *bd)
{
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_loop_queue *queue = hctx->driver_data;
	struct request *req = bd->rq;
	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
138
	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139
	blk_status_t ret;
140

141
	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142
		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
143

144
	ret = nvme_setup_cmd(ns, req, &iod->cmd);
145
	if (ret)
146 147
		return ret;

148
	blk_mq_start_request(req);
149
	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150
	iod->req.port = queue->ctrl->port;
151
	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
152
			&queue->nvme_sq, &nvme_loop_ops))
153
		return BLK_STS_OK;
154

155
	if (blk_rq_nr_phys_segments(req)) {
156
		iod->sg_table.sgl = iod->first_sgl;
157
		if (sg_alloc_table_chained(&iod->sg_table,
158
				blk_rq_nr_phys_segments(req),
159
				iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160
			nvme_cleanup_cmd(req);
161
			return BLK_STS_RESOURCE;
162
		}
163 164 165

		iod->req.sg = iod->sg_table.sgl;
		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166
		iod->req.transfer_len = blk_rq_payload_bytes(req);
167 168 169
	}

	schedule_work(&iod->work);
170
	return BLK_STS_OK;
171 172
}

173
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
174 175 176 177 178 179 180
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
	struct nvme_loop_queue *queue = &ctrl->queues[0];
	struct nvme_loop_iod *iod = &ctrl->async_event_iod;

	memset(&iod->cmd, 0, sizeof(iod->cmd));
	iod->cmd.common.opcode = nvme_admin_async_event;
K
Keith Busch 已提交
181
	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;

	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
			&nvme_loop_ops)) {
		dev_err(ctrl->ctrl.device, "failed async event work\n");
		return;
	}

	schedule_work(&iod->work);
}

static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
		struct nvme_loop_iod *iod, unsigned int queue_idx)
{
	iod->req.cmd = &iod->cmd;
197
	iod->req.cqe = &iod->cqe;
198 199 200 201 202
	iod->queue = &ctrl->queues[queue_idx];
	INIT_WORK(&iod->work, nvme_loop_execute_work);
	return 0;
}

203 204 205
static int nvme_loop_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
206
{
207
	struct nvme_loop_ctrl *ctrl = set->driver_data;
208

209
	nvme_req(req)->ctrl = &ctrl->ctrl;
210 211
	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
212 213 214 215 216 217 218 219
}

static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];

220
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	hctx->driver_data = queue;
	return 0;
}

static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_loop_ctrl *ctrl = data;
	struct nvme_loop_queue *queue = &ctrl->queues[0];

	BUG_ON(hctx_idx != 0);

	hctx->driver_data = queue;
	return 0;
}

238
static const struct blk_mq_ops nvme_loop_mq_ops = {
239 240 241 242 243 244
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
	.init_request	= nvme_loop_init_request,
	.init_hctx	= nvme_loop_init_hctx,
};

245
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
246 247
	.queue_rq	= nvme_loop_queue_rq,
	.complete	= nvme_loop_complete_rq,
248
	.init_request	= nvme_loop_init_request,
249 250 251 252 253
	.init_hctx	= nvme_loop_init_admin_hctx,
};

static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
254
	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
255
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
256
	blk_cleanup_queue(ctrl->ctrl.admin_q);
257
	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
}

static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);

	if (list_empty(&ctrl->list))
		goto free_ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_del(&ctrl->list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

	if (nctrl->tagset) {
		blk_cleanup_queue(ctrl->ctrl.connect_q);
		blk_mq_free_tag_set(&ctrl->tag_set);
	}
	kfree(ctrl->queues);
	nvmf_free_options(nctrl->opts);
free_ctrl:
	kfree(ctrl);
}

282 283 284 285
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i;

286 287
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
288
		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
289
	}
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
}

static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
{
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
	unsigned int nr_io_queues;
	int ret, i;

	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
	if (ret || !nr_io_queues)
		return ret;

	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);

	for (i = 1; i <= nr_io_queues; i++) {
		ctrl->queues[i].ctrl = ctrl;
		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
		if (ret)
			goto out_destroy_queues;

311
		ctrl->ctrl.queue_count++;
312 313 314 315 316 317 318 319 320
	}

	return 0;

out_destroy_queues:
	nvme_loop_destroy_io_queues(ctrl);
	return ret;
}

321 322 323 324
static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
{
	int i, ret;

325
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
326
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
327 328
		if (ret)
			return ret;
329
		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
330 331 332 333 334
	}

	return 0;
}

335 336 337 338 339 340
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
	int error;

	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
K
Keith Busch 已提交
341
	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
342 343 344
	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
345
		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
346 347 348
	ctrl->admin_tag_set.driver_data = ctrl;
	ctrl->admin_tag_set.nr_hw_queues = 1;
	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
349
	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
350 351 352 353 354

	ctrl->queues[0].ctrl = ctrl;
	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
	if (error)
		return error;
355
	ctrl->ctrl.queue_count = 1;
356 357 358 359

	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
	if (error)
		goto out_free_sq;
360
	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
361

362 363 364 365 366 367
	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
		error = PTR_ERR(ctrl->ctrl.fabrics_q);
		goto out_free_tagset;
	}

368 369 370
	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.admin_q)) {
		error = PTR_ERR(ctrl->ctrl.admin_q);
371
		goto out_cleanup_fabrics_q;
372 373 374 375 376 377
	}

	error = nvmf_connect_admin_queue(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

378 379
	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);

380
	error = nvme_enable_ctrl(&ctrl->ctrl);
381 382 383 384 385 386
	if (error)
		goto out_cleanup_queue;

	ctrl->ctrl.max_hw_sectors =
		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);

387 388
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

389 390 391 392 393 394 395 396
	error = nvme_init_identify(&ctrl->ctrl);
	if (error)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(ctrl->ctrl.admin_q);
397 398
out_cleanup_fabrics_q:
	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
399 400 401 402 403 404 405 406 407
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
	return error;
}

static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
408
	if (ctrl->ctrl.queue_count > 1) {
409 410 411
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
					nvme_cancel_request, &ctrl->ctrl);
412
		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
413
		nvme_loop_destroy_io_queues(ctrl);
414 415
	}

416
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
417 418 419 420 421
	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
		nvme_shutdown_ctrl(&ctrl->ctrl);

	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
				nvme_cancel_request, &ctrl->ctrl);
422
	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
423 424 425
	nvme_loop_destroy_admin_queue(ctrl);
}

426
static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
427
{
428
	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
429 430 431 432 433 434 435 436 437
}

static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
{
	struct nvme_loop_ctrl *ctrl;

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
		if (ctrl->ctrl.cntlid == nctrl->cntlid)
438
			nvme_delete_ctrl(&ctrl->ctrl);
439 440 441 442 443 444
	}
	mutex_unlock(&nvme_loop_ctrl_mutex);
}

static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
445 446
	struct nvme_loop_ctrl *ctrl =
		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
447
	bool changed;
448
	int ret;
449

450
	nvme_stop_ctrl(&ctrl->ctrl);
451 452
	nvme_loop_shutdown_ctrl(ctrl);

453 454 455 456 457 458
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
		/* state change failure should never happen */
		WARN_ON_ONCE(1);
		return;
	}

459 460 461 462
	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_disable;

463 464 465
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
		goto out_destroy_admin;
466

467 468 469
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_destroy_io;
470

471 472 473
	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
			ctrl->ctrl.queue_count - 1);

474 475 476
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

477
	nvme_start_ctrl(&ctrl->ctrl);
478 479 480

	return;

481 482 483
out_destroy_io:
	nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin:
484 485 486 487 488 489 490 491 492 493
	nvme_loop_destroy_admin_queue(ctrl);
out_disable:
	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
}

static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
	.name			= "loop",
	.module			= THIS_MODULE,
494
	.flags			= NVME_F_FABRICS,
495 496 497 498 499
	.reg_read32		= nvmf_reg_read32,
	.reg_read64		= nvmf_reg_read64,
	.reg_write32		= nvmf_reg_write32,
	.free_ctrl		= nvme_loop_free_ctrl,
	.submit_async_event	= nvme_loop_submit_async_event,
500
	.delete_ctrl		= nvme_loop_delete_ctrl_host,
501
	.get_address		= nvmf_get_address,
502 503 504 505
};

static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{
506
	int ret;
507

508 509
	ret = nvme_loop_init_io_queues(ctrl);
	if (ret)
510 511 512 513
		return ret;

	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
	ctrl->tag_set.ops = &nvme_loop_mq_ops;
514
	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
515 516 517 518
	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
	ctrl->tag_set.numa_node = NUMA_NO_NODE;
	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
519
		NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
520
	ctrl->tag_set.driver_data = ctrl;
521
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
522 523 524 525 526 527 528 529 530 531 532 533 534
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
	ctrl->ctrl.tagset = &ctrl->tag_set;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
	if (ret)
		goto out_destroy_queues;

	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
	if (IS_ERR(ctrl->ctrl.connect_q)) {
		ret = PTR_ERR(ctrl->ctrl.connect_q);
		goto out_free_tagset;
	}

535 536 537
	ret = nvme_loop_connect_io_queues(ctrl);
	if (ret)
		goto out_cleanup_connect_q;
538 539 540 541 542 543 544 545

	return 0;

out_cleanup_connect_q:
	blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tagset:
	blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
546
	nvme_loop_destroy_io_queues(ctrl);
547 548 549
	return ret;
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
{
	struct nvmet_port *p, *found = NULL;

	mutex_lock(&nvme_loop_ports_mutex);
	list_for_each_entry(p, &nvme_loop_ports, entry) {
		/* if no transport address is specified use the first port */
		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
			continue;
		found = p;
		break;
	}
	mutex_unlock(&nvme_loop_ports_mutex);
	return found;
}

567 568 569 570 571 572 573 574 575 576 577 578 579
static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{
	struct nvme_loop_ctrl *ctrl;
	bool changed;
	int ret;

	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return ERR_PTR(-ENOMEM);
	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->list);

580
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
581 582 583 584 585 586 587 588

	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
				0 /* no quirks, we're perfect! */);
	if (ret)
		goto out_put_ctrl;

	ret = -ENOMEM;

589
	ctrl->ctrl.sqsize = opts->queue_size - 1;
590
	ctrl->ctrl.kato = opts->kato;
591
	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
			GFP_KERNEL);
	if (!ctrl->queues)
		goto out_uninit_ctrl;

	ret = nvme_loop_configure_admin_queue(ctrl);
	if (ret)
		goto out_free_queues;

	if (opts->queue_size > ctrl->ctrl.maxcmd) {
		/* warn if maxcmd is lower than queue_size */
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
			opts->queue_size, ctrl->ctrl.maxcmd);
		opts->queue_size = ctrl->ctrl.maxcmd;
	}

	if (opts->nr_io_queues) {
		ret = nvme_loop_create_io_queues(ctrl);
		if (ret)
			goto out_remove_admin_queue;
	}

	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);

	dev_info(ctrl->ctrl.device,
		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);

621
	nvme_get_ctrl(&ctrl->ctrl);
622 623 624 625 626 627 628 629

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
	WARN_ON_ONCE(!changed);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
	mutex_unlock(&nvme_loop_ctrl_mutex);

630
	nvme_start_ctrl(&ctrl->ctrl);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648

	return &ctrl->ctrl;

out_remove_admin_queue:
	nvme_loop_destroy_admin_queue(ctrl);
out_free_queues:
	kfree(ctrl->queues);
out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
out_put_ctrl:
	nvme_put_ctrl(&ctrl->ctrl);
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
}

static int nvme_loop_add_port(struct nvmet_port *port)
{
649 650 651
	mutex_lock(&nvme_loop_ports_mutex);
	list_add_tail(&port->entry, &nvme_loop_ports);
	mutex_unlock(&nvme_loop_ports_mutex);
652 653 654 655 656
	return 0;
}

static void nvme_loop_remove_port(struct nvmet_port *port)
{
657 658 659
	mutex_lock(&nvme_loop_ports_mutex);
	list_del_init(&port->entry);
	mutex_unlock(&nvme_loop_ports_mutex);
660 661 662 663 664 665 666 667

	/*
	 * Ensure any ctrls that are in the process of being
	 * deleted are in fact deleted before we return
	 * and free the port. This is to prevent active
	 * ctrls from using a port after it's freed.
	 */
	flush_workqueue(nvme_delete_wq);
668 669
}

670
static const struct nvmet_fabrics_ops nvme_loop_ops = {
671 672 673 674 675 676 677 678 679 680
	.owner		= THIS_MODULE,
	.type		= NVMF_TRTYPE_LOOP,
	.add_port	= nvme_loop_add_port,
	.remove_port	= nvme_loop_remove_port,
	.queue_response = nvme_loop_queue_response,
	.delete_ctrl	= nvme_loop_delete_ctrl,
};

static struct nvmf_transport_ops nvme_loop_transport = {
	.name		= "loop",
681
	.module		= THIS_MODULE,
682
	.create_ctrl	= nvme_loop_create_ctrl,
683
	.allowed_opts	= NVMF_OPT_TRADDR,
684 685 686 687 688 689 690 691 692
};

static int __init nvme_loop_init_module(void)
{
	int ret;

	ret = nvmet_register_transport(&nvme_loop_ops);
	if (ret)
		return ret;
693 694 695 696 697 698

	ret = nvmf_register_transport(&nvme_loop_transport);
	if (ret)
		nvmet_unregister_transport(&nvme_loop_ops);

	return ret;
699 700 701 702 703 704 705 706 707 708 709
}

static void __exit nvme_loop_cleanup_module(void)
{
	struct nvme_loop_ctrl *ctrl, *next;

	nvmf_unregister_transport(&nvme_loop_transport);
	nvmet_unregister_transport(&nvme_loop_ops);

	mutex_lock(&nvme_loop_ctrl_mutex);
	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
710
		nvme_delete_ctrl(&ctrl->ctrl);
711 712
	mutex_unlock(&nvme_loop_ctrl_mutex);

713
	flush_workqueue(nvme_delete_wq);
714 715 716 717 718 719 720
}

module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);

MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */