core.c 37.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Common code for the NVMe target.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
8
#include <linux/random.h>
9
#include <linux/rculist.h>
10
#include <linux/pci-p2pdma.h>
11
#include <linux/scatterlist.h>
12

M
Minwoo Im 已提交
13 14 15
#define CREATE_TRACE_POINTS
#include "trace.h"

16 17
#include "nvmet.h"

18
struct workqueue_struct *buffered_io_wq;
19
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20
static DEFINE_IDA(cntlid_ida);
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39

/*
 * This read/write semaphore is used to synchronize access to configuration
 * information on a target system that will result in discovery log page
 * information change for at least one host.
 * The full list of resources to protected by this semaphore is:
 *
 *  - subsystems list
 *  - per-subsystem allowed hosts list
 *  - allow_any_host subsystem attribute
 *  - nvmet_genctr
 *  - the nvmet_transports array
 *
 * When updating any of those lists/structures write lock should be obtained,
 * while when reading (popolating discovery log page or checking host-subsystem
 * link) read lock is obtained to allow concurrent reads.
 */
DECLARE_RWSEM(nvmet_config_sem);

40 41 42 43
u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
u64 nvmet_ana_chgcnt;
DECLARE_RWSEM(nvmet_ana_sem);

44 45 46 47 48
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
{
	u16 status;

	switch (errno) {
49 50 51
	case 0:
		status = NVME_SC_SUCCESS;
		break;
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	case -ENOSPC:
		req->error_loc = offsetof(struct nvme_rw_command, length);
		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
		break;
	case -EREMOTEIO:
		req->error_loc = offsetof(struct nvme_rw_command, slba);
		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
		break;
	case -EOPNOTSUPP:
		req->error_loc = offsetof(struct nvme_common_command, opcode);
		switch (req->cmd->common.opcode) {
		case nvme_cmd_dsm:
		case nvme_cmd_write_zeroes:
			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
			break;
		default:
			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
		}
		break;
	case -ENODATA:
		req->error_loc = offsetof(struct nvme_rw_command, nsid);
		status = NVME_SC_ACCESS_DENIED;
		break;
	case -EIO:
		/* FALLTHRU */
	default:
		req->error_loc = offsetof(struct nvme_common_command, opcode);
		status = NVME_SC_INTERNAL | NVME_SC_DNR;
	}

	return status;
}

85 86 87 88 89 90
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
		const char *subsysnqn);

u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
		size_t len)
{
91 92
	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
		req->error_loc = offsetof(struct nvme_common_command, dptr);
93
		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94
	}
95 96 97 98 99
	return 0;
}

u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{
100 101
	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
		req->error_loc = offsetof(struct nvme_common_command, dptr);
102
		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103
	}
104 105 106
	return 0;
}

107 108
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{
109 110
	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
		req->error_loc = offsetof(struct nvme_common_command, dptr);
111
		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112
	}
113 114 115
	return 0;
}

116 117 118 119 120 121 122 123 124 125 126
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
{
	struct nvmet_ns *ns;

	if (list_empty(&subsys->namespaces))
		return 0;

	ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
	return ns->nsid;
}

127 128 129 130 131
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
{
	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}

132
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
133 134 135 136 137 138 139 140 141 142
{
	struct nvmet_async_event *aen;
	struct nvmet_req *req;

	while (1) {
		mutex_lock(&ctrl->lock);
		aen = list_first_entry_or_null(&ctrl->async_events,
				struct nvmet_async_event, entry);
		if (!aen || !ctrl->nr_async_event_cmds) {
			mutex_unlock(&ctrl->lock);
143
			break;
144 145 146
		}

		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
147 148
		if (status == 0)
			nvmet_set_result(req, nvmet_async_event_result(aen));
149 150 151 152 153

		list_del(&aen->entry);
		kfree(aen);

		mutex_unlock(&ctrl->lock);
154
		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
155
		nvmet_req_complete(req, status);
156 157 158
	}
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
{
	struct nvmet_req *req;

	mutex_lock(&ctrl->lock);
	while (ctrl->nr_async_event_cmds) {
		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
		mutex_unlock(&ctrl->lock);
		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
		mutex_lock(&ctrl->lock);
	}
	mutex_unlock(&ctrl->lock);
}

static void nvmet_async_event_work(struct work_struct *work)
{
	struct nvmet_ctrl *ctrl =
		container_of(work, struct nvmet_ctrl, async_event_work);

	nvmet_async_events_process(ctrl, 0);
}

181
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
		u8 event_info, u8 log_page)
{
	struct nvmet_async_event *aen;

	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
	if (!aen)
		return;

	aen->event_type = event_type;
	aen->event_info = event_info;
	aen->log_page = log_page;

	mutex_lock(&ctrl->lock);
	list_add_tail(&aen->entry, &ctrl->async_events);
	mutex_unlock(&ctrl->lock);

	schedule_work(&ctrl->async_event_work);
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
{
	u32 i;

	mutex_lock(&ctrl->lock);
	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
		goto out_unlock;

	for (i = 0; i < ctrl->nr_changed_ns; i++) {
		if (ctrl->changed_ns_list[i] == nsid)
			goto out_unlock;
	}

	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
		ctrl->nr_changed_ns = U32_MAX;
		goto out_unlock;
	}

	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
out_unlock:
	mutex_unlock(&ctrl->lock);
}

225
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
226 227 228
{
	struct nvmet_ctrl *ctrl;

229 230
	lockdep_assert_held(&subsys->lock);

231 232
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
233
		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
234
			continue;
235 236 237 238 239 240
		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
				NVME_AER_NOTICE_NS_CHANGED,
				NVME_LOG_CHANGED_NS);
	}
}

241 242 243 244 245 246 247 248 249
void nvmet_send_ana_event(struct nvmet_subsys *subsys,
		struct nvmet_port *port)
{
	struct nvmet_ctrl *ctrl;

	mutex_lock(&subsys->lock);
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
		if (port && ctrl->port != port)
			continue;
250
		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
			continue;
		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
	}
	mutex_unlock(&subsys->lock);
}

void nvmet_port_send_ana_event(struct nvmet_port *port)
{
	struct nvmet_subsys_link *p;

	down_read(&nvmet_config_sem);
	list_for_each_entry(p, &port->subsystems, entry)
		nvmet_send_ana_event(p->subsys, port);
	up_read(&nvmet_config_sem);
}

268
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
269 270 271 272 273 274 275 276 277 278 279 280 281 282
{
	int ret = 0;

	down_write(&nvmet_config_sem);
	if (nvmet_transports[ops->type])
		ret = -EINVAL;
	else
		nvmet_transports[ops->type] = ops;
	up_write(&nvmet_config_sem);

	return ret;
}
EXPORT_SYMBOL_GPL(nvmet_register_transport);

283
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
284 285 286 287 288 289 290
{
	down_write(&nvmet_config_sem);
	nvmet_transports[ops->type] = NULL;
	up_write(&nvmet_config_sem);
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);

291 292 293 294 295 296 297 298 299 300 301 302
void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
{
	struct nvmet_ctrl *ctrl;

	mutex_lock(&subsys->lock);
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
		if (ctrl->port == port)
			ctrl->ops->delete_ctrl(ctrl);
	}
	mutex_unlock(&subsys->lock);
}

303 304
int nvmet_enable_port(struct nvmet_port *port)
{
305
	const struct nvmet_fabrics_ops *ops;
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	int ret;

	lockdep_assert_held(&nvmet_config_sem);

	ops = nvmet_transports[port->disc_addr.trtype];
	if (!ops) {
		up_write(&nvmet_config_sem);
		request_module("nvmet-transport-%d", port->disc_addr.trtype);
		down_write(&nvmet_config_sem);
		ops = nvmet_transports[port->disc_addr.trtype];
		if (!ops) {
			pr_err("transport type %d not supported\n",
				port->disc_addr.trtype);
			return -EINVAL;
		}
	}

	if (!try_module_get(ops->owner))
		return -EINVAL;

326 327 328 329 330 331 332 333 334
	/*
	 * If the user requested PI support and the transport isn't pi capable,
	 * don't enable the port.
	 */
	if (port->pi_enable && !ops->metadata_support) {
		pr_err("T10-PI is not supported by transport type %d\n",
		       port->disc_addr.trtype);
		ret = -EINVAL;
		goto out_put;
335 336
	}

337 338 339 340
	ret = ops->add_port(port);
	if (ret)
		goto out_put;

341 342 343 344
	/* If the transport didn't set inline_data_size, then disable it. */
	if (port->inline_data_size < 0)
		port->inline_data_size = 0;

345
	port->enabled = true;
346
	port->tr_ops = ops;
347
	return 0;
348 349 350 351

out_put:
	module_put(ops->owner);
	return ret;
352 353 354 355
}

void nvmet_disable_port(struct nvmet_port *port)
{
356
	const struct nvmet_fabrics_ops *ops;
357 358 359 360

	lockdep_assert_held(&nvmet_config_sem);

	port->enabled = false;
361
	port->tr_ops = NULL;
362 363 364 365 366 367 368 369 370 371

	ops = nvmet_transports[port->disc_addr.trtype];
	ops->remove_port(port);
	module_put(ops->owner);
}

static void nvmet_keep_alive_timer(struct work_struct *work)
{
	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvmet_ctrl, ka_work);
372 373 374 375 376 377 378 379 380
	bool cmd_seen = ctrl->cmd_seen;

	ctrl->cmd_seen = false;
	if (cmd_seen) {
		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
			ctrl->cntlid);
		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
		return;
	}
381 382 383 384

	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
		ctrl->cntlid, ctrl->kato);

385
	nvmet_ctrl_fatal_error(ctrl);
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
}

static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
		ctrl->cntlid, ctrl->kato);

	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}

static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);

	cancel_delayed_work_sync(&ctrl->ka_work);
}

static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
		__le32 nsid)
{
	struct nvmet_ns *ns;

	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
		if (ns->nsid == le32_to_cpu(nsid))
			return ns;
	}

	return NULL;
}

struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
{
	struct nvmet_ns *ns;

	rcu_read_lock();
	ns = __nvmet_find_namespace(ctrl, nsid);
	if (ns)
		percpu_ref_get(&ns->ref);
	rcu_read_unlock();

	return ns;
}

static void nvmet_destroy_namespace(struct percpu_ref *ref)
{
	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);

	complete(&ns->disable_done);
}

void nvmet_put_namespace(struct nvmet_ns *ns)
{
	percpu_ref_put(&ns->ref);
}

442 443 444 445 446 447
static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
{
	nvmet_bdev_ns_disable(ns);
	nvmet_file_ns_disable(ns);
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
{
	int ret;
	struct pci_dev *p2p_dev;

	if (!ns->use_p2pmem)
		return 0;

	if (!ns->bdev) {
		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
		return -EINVAL;
	}

	if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
		       ns->device_path);
		return -EINVAL;
	}

	if (ns->p2p_dev) {
		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
		if (ret < 0)
			return -EINVAL;
	} else {
		/*
		 * Right now we just check that there is p2pmem available so
		 * we can report an error to the user right away if there
		 * is not. We'll find the actual device to use once we
		 * setup the controller when the port's device is available.
		 */

		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
		if (!p2p_dev) {
			pr_err("no peer-to-peer memory is available for %s\n",
			       ns->device_path);
			return -EINVAL;
		}

		pci_dev_put(p2p_dev);
	}

	return 0;
}

/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
				    struct nvmet_ns *ns)
{
	struct device *clients[2];
	struct pci_dev *p2p_dev;
	int ret;

502
	if (!ctrl->p2p_client || !ns->use_p2pmem)
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
		return;

	if (ns->p2p_dev) {
		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
		if (ret < 0)
			return;

		p2p_dev = pci_dev_get(ns->p2p_dev);
	} else {
		clients[0] = ctrl->p2p_client;
		clients[1] = nvmet_ns_dev(ns);

		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
		if (!p2p_dev) {
			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
			       dev_name(ctrl->p2p_client), ns->device_path);
			return;
		}
	}

	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
	if (ret < 0)
		pci_dev_put(p2p_dev);

	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
		ns->nsid);
}

531 532
void nvmet_ns_revalidate(struct nvmet_ns *ns)
{
533 534
	loff_t oldsize = ns->size;

535 536 537 538
	if (ns->bdev)
		nvmet_bdev_ns_revalidate(ns);
	else
		nvmet_file_ns_revalidate(ns);
539 540 541

	if (oldsize != ns->size)
		nvmet_ns_changed(ns->subsys, ns->nsid);
542 543
}

544 545 546
int nvmet_ns_enable(struct nvmet_ns *ns)
{
	struct nvmet_subsys *subsys = ns->subsys;
547
	struct nvmet_ctrl *ctrl;
548
	int ret;
549 550

	mutex_lock(&subsys->lock);
551
	ret = 0;
552
	if (ns->enabled)
553 554
		goto out_unlock;

555 556 557 558
	ret = -EMFILE;
	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
		goto out_unlock;

559
	ret = nvmet_bdev_ns_enable(ns);
560
	if (ret == -ENOTBLK)
561 562
		ret = nvmet_file_ns_enable(ns);
	if (ret)
563 564
		goto out_unlock;

565 566
	ret = nvmet_p2pmem_ns_enable(ns);
	if (ret)
567
		goto out_dev_disable;
568 569 570 571

	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
		nvmet_p2pmem_ns_add_p2p(ctrl, ns);

572 573 574
	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
				0, GFP_KERNEL);
	if (ret)
575
		goto out_dev_put;
576 577 578 579 580 581 582 583 584 585 586 587 588

	if (ns->nsid > subsys->max_nsid)
		subsys->max_nsid = ns->nsid;

	/*
	 * The namespaces list needs to be sorted to simplify the implementation
	 * of the Identify Namepace List subcommand.
	 */
	if (list_empty(&subsys->namespaces)) {
		list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
	} else {
		struct nvmet_ns *old;

589 590
		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
					lockdep_is_held(&subsys->lock)) {
591 592 593 594 595 596 597
			BUG_ON(ns->nsid == old->nsid);
			if (ns->nsid < old->nsid)
				break;
		}

		list_add_tail_rcu(&ns->dev_link, &old->dev_link);
	}
598
	subsys->nr_namespaces++;
599

600
	nvmet_ns_changed(subsys, ns->nsid);
601
	ns->enabled = true;
602 603 604 605
	ret = 0;
out_unlock:
	mutex_unlock(&subsys->lock);
	return ret;
606
out_dev_put:
607 608
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
609
out_dev_disable:
610
	nvmet_ns_dev_disable(ns);
611 612 613 614 615 616
	goto out_unlock;
}

void nvmet_ns_disable(struct nvmet_ns *ns)
{
	struct nvmet_subsys *subsys = ns->subsys;
617
	struct nvmet_ctrl *ctrl;
618 619

	mutex_lock(&subsys->lock);
620 621 622 623 624
	if (!ns->enabled)
		goto out_unlock;

	ns->enabled = false;
	list_del_rcu(&ns->dev_link);
625 626
	if (ns->nsid == subsys->max_nsid)
		subsys->max_nsid = nvmet_max_nsid(subsys);
627 628 629 630

	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	mutex_unlock(&subsys->lock);

	/*
	 * Now that we removed the namespaces from the lookup list, we
	 * can kill the per_cpu ref and wait for any remaining references
	 * to be dropped, as well as a RCU grace period for anyone only
	 * using the namepace under rcu_read_lock().  Note that we can't
	 * use call_rcu here as we need to ensure the namespaces have
	 * been fully destroyed before unloading the module.
	 */
	percpu_ref_kill(&ns->ref);
	synchronize_rcu();
	wait_for_completion(&ns->disable_done);
	percpu_ref_exit(&ns->ref);

	mutex_lock(&subsys->lock);
647

648
	subsys->nr_namespaces--;
649
	nvmet_ns_changed(subsys, ns->nsid);
650
	nvmet_ns_dev_disable(ns);
651
out_unlock:
652 653 654 655 656 657 658
	mutex_unlock(&subsys->lock);
}

void nvmet_ns_free(struct nvmet_ns *ns)
{
	nvmet_ns_disable(ns);

659 660 661 662
	down_write(&nvmet_ana_sem);
	nvmet_ana_group_enabled[ns->anagrpid]--;
	up_write(&nvmet_ana_sem);

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	kfree(ns->device_path);
	kfree(ns);
}

struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{
	struct nvmet_ns *ns;

	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
	if (!ns)
		return NULL;

	INIT_LIST_HEAD(&ns->dev_link);
	init_completion(&ns->disable_done);

	ns->nsid = nsid;
	ns->subsys = subsys;
680 681 682 683 684 685

	down_write(&nvmet_ana_sem);
	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
	nvmet_ana_group_enabled[ns->anagrpid]++;
	up_write(&nvmet_ana_sem);

686
	uuid_gen(&ns->uuid);
687
	ns->buffered_io = false;
688 689 690 691

	return ns;
}

692
static void nvmet_update_sq_head(struct nvmet_req *req)
693
{
J
James Smart 已提交
694
	if (req->sq->size) {
695 696
		u32 old_sqhd, new_sqhd;

J
James Smart 已提交
697 698 699 700 701 702
		do {
			old_sqhd = req->sq->sqhd;
			new_sqhd = (old_sqhd + 1) % req->sq->size;
		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
					old_sqhd);
	}
703
	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
704 705
}

706 707 708 709 710 711
static void nvmet_set_error(struct nvmet_req *req, u16 status)
{
	struct nvmet_ctrl *ctrl = req->sq->ctrl;
	struct nvme_error_slot *new_error_slot;
	unsigned long flags;

712
	req->cqe->status = cpu_to_le16(status << 1);
713

714
	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
		return;

	spin_lock_irqsave(&ctrl->error_lock, flags);
	ctrl->err_counter++;
	new_error_slot =
		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];

	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
	new_error_slot->status_field = cpu_to_le16(status << 1);
	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
	new_error_slot->lba = cpu_to_le64(req->error_slba);
	new_error_slot->nsid = req->cmd->common.nsid;
	spin_unlock_irqrestore(&ctrl->error_lock, flags);

	/* set the more bit for this request */
732
	req->cqe->status |= cpu_to_le16(1 << 14);
733 734
}

735 736 737 738
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
	if (!req->sq->sqhd_disabled)
		nvmet_update_sq_head(req);
739 740
	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
	req->cqe->command_id = req->cmd->common.command_id;
741

742
	if (unlikely(status))
743
		nvmet_set_error(req, status);
M
Minwoo Im 已提交
744 745 746

	trace_nvmet_req_complete(req);

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	if (req->ns)
		nvmet_put_namespace(req->ns);
	req->ops->queue_response(req);
}

void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
	__nvmet_req_complete(req, status);
	percpu_ref_put(&req->sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);

void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
		u16 qid, u16 size)
{
	cq->qid = qid;
	cq->size = size;

	ctrl->cqs[qid] = cq;
}

void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
		u16 qid, u16 size)
{
771
	sq->sqhd = 0;
772 773 774 775 776 777
	sq->qid = qid;
	sq->size = size;

	ctrl->sqs[qid] = sq;
}

778 779 780 781 782 783 784
static void nvmet_confirm_sq(struct percpu_ref *ref)
{
	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);

	complete(&sq->confirm_done);
}

785 786
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
787 788 789
	u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
	struct nvmet_ctrl *ctrl = sq->ctrl;

790 791 792 793
	/*
	 * If this is the admin queue, complete all AERs so that our
	 * queue doesn't have outstanding requests on it.
	 */
794 795 796 797
	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
		nvmet_async_events_process(ctrl, status);
		nvmet_async_events_free(ctrl);
	}
798 799
	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
	wait_for_completion(&sq->confirm_done);
800 801 802
	wait_for_completion(&sq->free_done);
	percpu_ref_exit(&sq->ref);

803 804
	if (ctrl) {
		nvmet_ctrl_put(ctrl);
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		sq->ctrl = NULL; /* allows reusing the queue later */
	}
}
EXPORT_SYMBOL_GPL(nvmet_sq_destroy);

static void nvmet_sq_free(struct percpu_ref *ref)
{
	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);

	complete(&sq->free_done);
}

int nvmet_sq_init(struct nvmet_sq *sq)
{
	int ret;

	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
	if (ret) {
		pr_err("percpu_ref init failed!\n");
		return ret;
	}
	init_completion(&sq->free_done);
827
	init_completion(&sq->confirm_done);
828 829 830 831 832

	return 0;
}
EXPORT_SYMBOL_GPL(nvmet_sq_init);

833 834 835 836 837 838 839 840 841 842 843 844 845 846
static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
		struct nvmet_ns *ns)
{
	enum nvme_ana_state state = port->ana_state[ns->anagrpid];

	if (unlikely(state == NVME_ANA_INACCESSIBLE))
		return NVME_SC_ANA_INACCESSIBLE;
	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
		return NVME_SC_ANA_PERSISTENT_LOSS;
	if (unlikely(state == NVME_ANA_CHANGE))
		return NVME_SC_ANA_TRANSITION;
	return 0;
}

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
{
	if (unlikely(req->ns->readonly)) {
		switch (req->cmd->common.opcode) {
		case nvme_cmd_read:
		case nvme_cmd_flush:
			break;
		default:
			return NVME_SC_NS_WRITE_PROTECTED;
		}
	}

	return 0;
}

862 863 864 865 866 867 868 869 870 871
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
	struct nvme_command *cmd = req->cmd;
	u16 ret;

	ret = nvmet_check_ctrl_status(req, cmd);
	if (unlikely(ret))
		return ret;

	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
872 873
	if (unlikely(!req->ns)) {
		req->error_loc = offsetof(struct nvme_common_command, nsid);
874
		return NVME_SC_INVALID_NS | NVME_SC_DNR;
875
	}
876
	ret = nvmet_check_ana_state(req->port, req->ns);
877 878
	if (unlikely(ret)) {
		req->error_loc = offsetof(struct nvme_common_command, nsid);
879
		return ret;
880
	}
881
	ret = nvmet_io_cmd_check_access(req);
882 883
	if (unlikely(ret)) {
		req->error_loc = offsetof(struct nvme_common_command, nsid);
884
		return ret;
885
	}
886 887 888 889 890 891 892

	if (req->ns->file)
		return nvmet_file_parse_io_cmd(req);
	else
		return nvmet_bdev_parse_io_cmd(req);
}

893
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
894
		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
895 896 897 898 899 900 901 902
{
	u8 flags = req->cmd->common.flags;
	u16 status;

	req->cq = cq;
	req->sq = sq;
	req->ops = ops;
	req->sg = NULL;
903
	req->metadata_sg = NULL;
904
	req->sg_cnt = 0;
905
	req->metadata_sg_cnt = 0;
906
	req->transfer_len = 0;
907
	req->metadata_len = 0;
908 909
	req->cqe->status = 0;
	req->cqe->sq_head = 0;
910
	req->ns = NULL;
911
	req->error_loc = NVMET_NO_ERROR_LOC;
912
	req->error_slba = 0;
913

M
Minwoo Im 已提交
914 915
	trace_nvmet_req_init(req, req->cmd);

916 917
	/* no support for fused commands yet */
	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
918
		req->error_loc = offsetof(struct nvme_common_command, flags);
919 920 921 922
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		goto fail;
	}

923 924 925 926 927 928
	/*
	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
	 * contains an address of a single contiguous physical buffer that is
	 * byte aligned.
	 */
	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
929
		req->error_loc = offsetof(struct nvme_common_command, flags);
930 931 932 933 934
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		goto fail;
	}

	if (unlikely(!req->sq->ctrl))
935
		/* will return an error for any non-connect command: */
936 937 938 939 940 941 942 943 944 945 946 947 948 949
		status = nvmet_parse_connect_cmd(req);
	else if (likely(req->sq->qid != 0))
		status = nvmet_parse_io_cmd(req);
	else
		status = nvmet_parse_admin_cmd(req);

	if (status)
		goto fail;

	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		goto fail;
	}

950 951 952
	if (sq->ctrl)
		sq->ctrl->cmd_seen = true;

953 954 955 956 957 958 959 960
	return true;

fail:
	__nvmet_req_complete(req, status);
	return false;
}
EXPORT_SYMBOL_GPL(nvmet_req_init);

961 962 963
void nvmet_req_uninit(struct nvmet_req *req)
{
	percpu_ref_put(&req->sq->ref);
964 965
	if (req->ns)
		nvmet_put_namespace(req->ns);
966 967 968
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);

969
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
970
{
971
	if (unlikely(len != req->transfer_len)) {
972
		req->error_loc = offsetof(struct nvme_common_command, dptr);
973
		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
974 975 976 977 978
		return false;
	}

	return true;
}
979
EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
980

981 982 983 984 985 986 987 988 989 990 991
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
	if (unlikely(data_len > req->transfer_len)) {
		req->error_loc = offsetof(struct nvme_common_command, dptr);
		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
		return false;
	}

	return true;
}

992
static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
993
{
994 995
	return req->transfer_len - req->metadata_len;
}
996

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
{
	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
			nvmet_data_transfer_len(req));
	if (!req->sg)
		goto out_err;

	if (req->metadata_len) {
		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
				&req->metadata_sg_cnt, req->metadata_len);
		if (!req->metadata_sg)
			goto out_free_sg;
	}
	return 0;
out_free_sg:
	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
out_err:
	return -ENOMEM;
}

static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
{
	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
		return false;

	if (req->sq->ctrl && req->sq->qid && req->ns) {
		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
						 req->ns->nsid);
		if (req->p2p_dev)
			return true;
1027 1028
	}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
	req->p2p_dev = NULL;
	return false;
}

int nvmet_req_alloc_sgls(struct nvmet_req *req)
{
	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
		return 0;

	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
			    &req->sg_cnt);
1040
	if (unlikely(!req->sg))
1041 1042 1043 1044 1045 1046 1047 1048
		goto out;

	if (req->metadata_len) {
		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
					     &req->metadata_sg_cnt);
		if (unlikely(!req->metadata_sg))
			goto out_free;
	}
1049 1050

	return 0;
1051 1052 1053 1054
out_free:
	sgl_free(req->sg);
out:
	return -ENOMEM;
1055
}
1056
EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1057

1058
void nvmet_req_free_sgls(struct nvmet_req *req)
1059
{
1060
	if (req->p2p_dev) {
1061
		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1062 1063 1064
		if (req->metadata_sg)
			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
	} else {
1065
		sgl_free(req->sg);
1066 1067 1068
		if (req->metadata_sg)
			sgl_free(req->metadata_sg);
	}
1069

1070
	req->sg = NULL;
1071
	req->metadata_sg = NULL;
1072
	req->sg_cnt = 0;
1073
	req->metadata_sg_cnt = 0;
1074
}
1075
EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1076

1077 1078
static inline bool nvmet_cc_en(u32 cc)
{
1079
	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1080 1081 1082 1083
}

static inline u8 nvmet_cc_css(u32 cc)
{
1084
	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1085 1086 1087 1088
}

static inline u8 nvmet_cc_mps(u32 cc)
{
1089
	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1090 1091 1092 1093
}

static inline u8 nvmet_cc_ams(u32 cc)
{
1094
	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1095 1096 1097 1098
}

static inline u8 nvmet_cc_shn(u32 cc)
{
1099
	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1100 1101 1102 1103
}

static inline u8 nvmet_cc_iosqes(u32 cc)
{
1104
	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1105 1106 1107 1108
}

static inline u8 nvmet_cc_iocqes(u32 cc)
{
1109
	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
}

static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{
	lockdep_assert_held(&ctrl->lock);

	if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
	    nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
	    nvmet_cc_mps(ctrl->cc) != 0 ||
	    nvmet_cc_ams(ctrl->cc) != 0 ||
	    nvmet_cc_css(ctrl->cc) != 0) {
		ctrl->csts = NVME_CSTS_CFS;
		return;
	}

	ctrl->csts = NVME_CSTS_RDY;
1126 1127 1128 1129 1130 1131 1132 1133

	/*
	 * Controllers that are not yet enabled should not really enforce the
	 * keep alive timeout, but we still want to track a timeout and cleanup
	 * in case a host died before it enabled the controller.  Hence, simply
	 * reset the keep alive timer when the controller is enabled.
	 */
	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
}

static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
{
	lockdep_assert_held(&ctrl->lock);

	/* XXX: tear down queues? */
	ctrl->csts &= ~NVME_CSTS_RDY;
	ctrl->cc = 0;
}

void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
{
	u32 old;

	mutex_lock(&ctrl->lock);
	old = ctrl->cc;
	ctrl->cc = new;

	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
		nvmet_start_ctrl(ctrl);
	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
		nvmet_clear_ctrl(ctrl);
	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
		nvmet_clear_ctrl(ctrl);
		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
	}
	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
	mutex_unlock(&ctrl->lock);
}

static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
	/* command sets supported: NVMe command set: */
	ctrl->cap = (1ULL << 37);
	/* CC.EN timeout in 500msec units: */
	ctrl->cap |= (15ULL << 24);
	/* maximum queue entries supported: */
	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
}

u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
		struct nvmet_req *req, struct nvmet_ctrl **ret)
{
	struct nvmet_subsys *subsys;
	struct nvmet_ctrl *ctrl;
	u16 status = 0;

	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
	if (!subsys) {
		pr_warn("connect request for invalid subsystem %s!\n",
			subsysnqn);
1187
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
	}

	mutex_lock(&subsys->lock);
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
		if (ctrl->cntlid == cntlid) {
			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
				pr_warn("hostnqn mismatch.\n");
				continue;
			}
			if (!kref_get_unless_zero(&ctrl->ref))
				continue;

			*ret = ctrl;
			goto out;
		}
	}

	pr_warn("could not find controller %d for subsys %s / host %s\n",
		cntlid, subsysnqn, hostnqn);
1208
	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1209 1210 1211 1212 1213 1214 1215 1216
	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;

out:
	mutex_unlock(&subsys->lock);
	nvmet_subsys_put(subsys);
	return status;
}

1217 1218 1219
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
{
	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1220
		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1221 1222 1223 1224 1225
		       cmd->common.opcode, req->sq->qid);
		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
	}

	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1226
		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1227 1228 1229 1230 1231 1232
		       cmd->common.opcode, req->sq->qid);
		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
	}
	return 0;
}

1233
bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1234 1235 1236
{
	struct nvmet_host_link *p;

1237 1238
	lockdep_assert_held(&nvmet_config_sem);

1239 1240 1241
	if (subsys->allow_any_host)
		return true;

1242 1243 1244
	if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
		return true;

1245 1246 1247 1248 1249 1250 1251 1252
	list_for_each_entry(p, &subsys->hosts, entry) {
		if (!strcmp(nvmet_host_name(p->host), hostnqn))
			return true;
	}

	return false;
}

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
		struct nvmet_req *req)
{
	struct nvmet_ns *ns;

	if (!req->p2p_client)
		return;

	ctrl->p2p_client = get_device(req->p2p_client);

1266 1267
	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
				lockdep_is_held(&ctrl->subsys->lock))
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}

/*
 * Note: ctrl->subsys->lock should be held when calling this function
 */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
	struct radix_tree_iter iter;
	void __rcu **slot;

	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
		pci_dev_put(radix_tree_deref_slot(slot));

	put_device(ctrl->p2p_client);
}

1285 1286 1287 1288 1289 1290 1291 1292 1293
static void nvmet_fatal_error_handler(struct work_struct *work)
{
	struct nvmet_ctrl *ctrl =
			container_of(work, struct nvmet_ctrl, fatal_err_work);

	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
	ctrl->ops->delete_ctrl(ctrl);
}

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
	struct nvmet_subsys *subsys;
	struct nvmet_ctrl *ctrl;
	int ret;
	u16 status;

	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
	if (!subsys) {
		pr_warn("connect request for invalid subsystem %s!\n",
			subsysnqn);
1307
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1308 1309 1310 1311 1312
		goto out;
	}

	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
	down_read(&nvmet_config_sem);
1313
	if (!nvmet_host_allowed(subsys, hostnqn)) {
1314 1315
		pr_info("connect by host %s for subsystem %s not allowed\n",
			hostnqn, subsysnqn);
1316
		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1317
		up_read(&nvmet_config_sem);
1318
		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
		goto out_put_subsystem;
	}
	up_read(&nvmet_config_sem);

	status = NVME_SC_INTERNAL;
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		goto out_put_subsystem;
	mutex_init(&ctrl->lock);

	nvmet_init_cap(ctrl);

1331 1332
	ctrl->port = req->port;

1333 1334
	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
	INIT_LIST_HEAD(&ctrl->async_events);
1335
	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1336
	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1337 1338 1339 1340 1341 1342

	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);

	kref_init(&ctrl->ref);
	ctrl->subsys = subsys;
1343
	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1344

1345 1346 1347 1348 1349
	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
			sizeof(__le32), GFP_KERNEL);
	if (!ctrl->changed_ns_list)
		goto out_free_ctrl;

1350 1351 1352 1353
	ctrl->cqs = kcalloc(subsys->max_qid + 1,
			sizeof(struct nvmet_cq *),
			GFP_KERNEL);
	if (!ctrl->cqs)
1354
		goto out_free_changed_ns_list;
1355 1356 1357 1358 1359 1360 1361

	ctrl->sqs = kcalloc(subsys->max_qid + 1,
			sizeof(struct nvmet_sq *),
			GFP_KERNEL);
	if (!ctrl->sqs)
		goto out_free_cqs;

1362 1363 1364
	if (subsys->cntlid_min > subsys->cntlid_max)
		goto out_free_cqs;

1365
	ret = ida_simple_get(&cntlid_ida,
1366
			     subsys->cntlid_min, subsys->cntlid_max,
1367 1368 1369 1370 1371 1372 1373 1374 1375
			     GFP_KERNEL);
	if (ret < 0) {
		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
		goto out_free_sqs;
	}
	ctrl->cntlid = ret;

	ctrl->ops = req->ops;

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	/*
	 * Discovery controllers may use some arbitrary high value
	 * in order to cleanup stale discovery sessions
	 */
	if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
		kato = NVMET_DISC_KATO_MS;

	/* keep-alive timeout in seconds */
	ctrl->kato = DIV_ROUND_UP(kato, 1000);

1386 1387 1388
	ctrl->err_counter = 0;
	spin_lock_init(&ctrl->error_lock);

1389 1390 1391 1392
	nvmet_start_keep_alive_timer(ctrl);

	mutex_lock(&subsys->lock);
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1393
	nvmet_setup_p2p_ns_map(ctrl, req);
1394 1395 1396 1397 1398 1399 1400 1401 1402
	mutex_unlock(&subsys->lock);

	*ctrlp = ctrl;
	return 0;

out_free_sqs:
	kfree(ctrl->sqs);
out_free_cqs:
	kfree(ctrl->cqs);
1403 1404
out_free_changed_ns_list:
	kfree(ctrl->changed_ns_list);
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
out_free_ctrl:
	kfree(ctrl);
out_put_subsystem:
	nvmet_subsys_put(subsys);
out:
	return status;
}

static void nvmet_ctrl_free(struct kref *ref)
{
	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
	struct nvmet_subsys *subsys = ctrl->subsys;

	mutex_lock(&subsys->lock);
1419
	nvmet_release_p2p_ns_map(ctrl);
1420 1421 1422
	list_del(&ctrl->subsys_entry);
	mutex_unlock(&subsys->lock);

1423 1424
	nvmet_stop_keep_alive_timer(ctrl);

1425 1426 1427
	flush_work(&ctrl->async_event_work);
	cancel_work_sync(&ctrl->fatal_err_work);

1428
	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1429 1430 1431

	kfree(ctrl->sqs);
	kfree(ctrl->cqs);
1432
	kfree(ctrl->changed_ns_list);
1433
	kfree(ctrl);
1434 1435

	nvmet_subsys_put(subsys);
1436 1437 1438 1439 1440 1441 1442 1443 1444
}

void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{
	kref_put(&ctrl->ref, nvmet_ctrl_free);
}

void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
1445 1446 1447 1448 1449 1450
	mutex_lock(&ctrl->lock);
	if (!(ctrl->csts & NVME_CSTS_CFS)) {
		ctrl->csts |= NVME_CSTS_CFS;
		schedule_work(&ctrl->fatal_err_work);
	}
	mutex_unlock(&ctrl->lock);
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
}
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);

static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
		const char *subsysnqn)
{
	struct nvmet_subsys_link *p;

	if (!port)
		return NULL;

1462
	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
			return NULL;
		return nvmet_disc_subsys;
	}

	down_read(&nvmet_config_sem);
	list_for_each_entry(p, &port->subsystems, entry) {
		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
				NVMF_NQN_SIZE)) {
			if (!kref_get_unless_zero(&p->subsys->ref))
				break;
			up_read(&nvmet_config_sem);
			return p->subsys;
		}
	}
	up_read(&nvmet_config_sem);
	return NULL;
}

struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
		enum nvme_subsys_type type)
{
	struct nvmet_subsys *subsys;

	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
	if (!subsys)
1489
		return ERR_PTR(-ENOMEM);
1490

1491
	subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1492 1493
	/* generate a random serial number as our controllers are ephemeral: */
	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504

	switch (type) {
	case NVME_NQN_NVME:
		subsys->max_qid = NVMET_NR_QUEUES;
		break;
	case NVME_NQN_DISC:
		subsys->max_qid = 0;
		break;
	default:
		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
		kfree(subsys);
1505
		return ERR_PTR(-EINVAL);
1506 1507 1508 1509
	}
	subsys->type = type;
	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
			GFP_KERNEL);
1510
	if (!subsys->subsysnqn) {
1511
		kfree(subsys);
1512
		return ERR_PTR(-ENOMEM);
1513
	}
1514 1515
	subsys->cntlid_min = NVME_CNTLID_MIN;
	subsys->cntlid_max = NVME_CNTLID_MAX;
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
	kref_init(&subsys->ref);

	mutex_init(&subsys->lock);
	INIT_LIST_HEAD(&subsys->namespaces);
	INIT_LIST_HEAD(&subsys->ctrls);
	INIT_LIST_HEAD(&subsys->hosts);

	return subsys;
}

static void nvmet_subsys_free(struct kref *ref)
{
	struct nvmet_subsys *subsys =
		container_of(ref, struct nvmet_subsys, ref);

	WARN_ON_ONCE(!list_empty(&subsys->namespaces));

	kfree(subsys->subsysnqn);
1534
	kfree_rcu(subsys->model, rcuhead);
1535 1536 1537
	kfree(subsys);
}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
{
	struct nvmet_ctrl *ctrl;

	mutex_lock(&subsys->lock);
	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
		ctrl->ops->delete_ctrl(ctrl);
	mutex_unlock(&subsys->lock);
}

1548 1549 1550 1551 1552 1553 1554 1555 1556
void nvmet_subsys_put(struct nvmet_subsys *subsys)
{
	kref_put(&subsys->ref, nvmet_subsys_free);
}

static int __init nvmet_init(void)
{
	int error;

1557 1558
	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;

1559 1560 1561 1562 1563 1564
	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
			WQ_MEM_RECLAIM, 0);
	if (!buffered_io_wq) {
		error = -ENOMEM;
		goto out;
	}
1565

1566 1567
	error = nvmet_init_discovery();
	if (error)
1568
		goto out_free_work_queue;
1569 1570 1571 1572 1573 1574 1575 1576

	error = nvmet_init_configfs();
	if (error)
		goto out_exit_discovery;
	return 0;

out_exit_discovery:
	nvmet_exit_discovery();
1577 1578
out_free_work_queue:
	destroy_workqueue(buffered_io_wq);
1579 1580 1581 1582 1583 1584 1585 1586
out:
	return error;
}

static void __exit nvmet_exit(void)
{
	nvmet_exit_configfs();
	nvmet_exit_discovery();
1587
	ida_destroy(&cntlid_ida);
1588
	destroy_workqueue(buffered_io_wq);
1589 1590 1591 1592 1593 1594 1595 1596 1597

	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
}

module_init(nvmet_init);
module_exit(nvmet_exit);

MODULE_LICENSE("GPL v2");