core.c 130.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
9
#include <linux/blk-integrity.h>
10
#include <linux/compat.h>
11
#include <linux/delay.h>
12
#include <linux/errno.h>
13
#include <linux/hdreg.h>
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/backing-dev.h>
17 18
#include <linux/slab.h>
#include <linux/types.h>
19 20 21
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
22
#include <linux/pm_qos.h>
23
#include <asm/unaligned.h>
24 25

#include "nvme.h"
S
Sagi Grimberg 已提交
26
#include "fabrics.h"
27

H
Hannes Reinecke 已提交
28 29 30
#define CREATE_TRACE_POINTS
#include "trace.h"

31 32
#define NVME_MINORS		(1U << MINORBITS)

33 34
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
35
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36
EXPORT_SYMBOL_GPL(admin_timeout);
37

38 39
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41
EXPORT_SYMBOL_GPL(nvme_io_timeout);
42

43
static unsigned char shutdown_timeout = 5;
44 45 46
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

47 48
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
K
Keith Busch 已提交
49
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50

51
static unsigned long default_ps_max_latency_us = 100000;
52 53 54 55
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

56 57 58 59
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static unsigned long apst_primary_timeout_ms = 100;
module_param(apst_primary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_primary_timeout_ms,
	"primary APST timeout in ms");

static unsigned long apst_secondary_timeout_ms = 2000;
module_param(apst_secondary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_timeout_ms,
	"secondary APST timeout in ms");

static unsigned long apst_primary_latency_tol_us = 15000;
module_param(apst_primary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_primary_latency_tol_us,
	"primary APST latency tolerance in us");

static unsigned long apst_secondary_latency_tol_us = 100000;
module_param(apst_secondary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_latency_tol_us,
	"secondary APST latency tolerance in us");

80 81 82 83 84
/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
85 86
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
87 88 89 90
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
91 92 93
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

94 95 96 97 98 99
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);

C
Christoph Hellwig 已提交
100 101
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
102

103
static DEFINE_IDA(nvme_instance_ida);
104
static dev_t nvme_ctrl_base_chr_devt;
105
static struct class *nvme_class;
C
Christoph Hellwig 已提交
106
static struct class *nvme_subsys_class;
107

108 109 110 111
static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
static struct class *nvme_ns_chr_class;

112
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
113 114
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);
115 116
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd);
117

118
void nvme_queue_scan(struct nvme_ctrl *ctrl)
119 120 121 122
{
	/*
	 * Only new queue scan work when admin and IO queues are both alive
	 */
K
Keith Busch 已提交
123
	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
124 125 126
		queue_work(nvme_wq, &ctrl->scan_work);
}

127 128 129 130 131 132
/*
 * Use this function to proceed with scheduling reset_work for a controller
 * that had previously been set to the resetting state. This is intended for
 * code paths that can't be interrupted by other reset attempts. A hot removal
 * may prevent this from succeeding.
 */
133
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
134 135 136 137 138 139 140
{
	if (ctrl->state != NVME_CTRL_RESETTING)
		return -EBUSY;
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
		return -EBUSY;
	return 0;
}
141
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
142

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void nvme_failfast_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, failfast_work);

	if (ctrl->state != NVME_CTRL_CONNECTING)
		return;

	set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
	dev_info(ctrl->device, "failfast expired\n");
	nvme_kick_requeue_lists(ctrl);
}

static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
		return;

	schedule_delayed_work(&ctrl->failfast_work,
			      ctrl->opts->fast_io_fail_tmo * HZ);
}

static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts)
		return;

	cancel_delayed_work_sync(&ctrl->failfast_work);
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
}


175 176 177 178
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
179
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
180 181 182 183 184
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

185
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
186 187 188 189
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
190
	if (!ret) {
191
		flush_work(&ctrl->reset_work);
K
Keith Busch 已提交
192
		if (ctrl->state != NVME_CTRL_LIVE)
193 194 195
			ret = -ENETRESET;
	}

196 197 198
	return ret;
}

199
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
200
{
201
	dev_info(ctrl->device,
202
		 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
203

204
	flush_work(&ctrl->reset_work);
205 206
	nvme_stop_ctrl(ctrl);
	nvme_remove_namespaces(ctrl);
207
	ctrl->ops->delete_ctrl(ctrl);
208
	nvme_uninit_ctrl(ctrl);
209 210
}

211 212 213 214 215 216 217 218
static void nvme_delete_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, delete_work);

	nvme_do_delete_ctrl(ctrl);
}

219 220 221 222
int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
		return -EBUSY;
223
	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
224 225 226 227 228
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);

229
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
230 231
{
	/*
232 233
	 * Keep a reference until nvme_do_delete_ctrl() complete,
	 * since ->delete_ctrl can free the controller.
234 235
	 */
	nvme_get_ctrl(ctrl);
236
	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
237
		nvme_do_delete_ctrl(ctrl);
238 239 240
	nvme_put_ctrl(ctrl);
}

241
static blk_status_t nvme_error_status(u16 status)
242
{
243
	switch (status & 0x7ff) {
244
	case NVME_SC_SUCCESS:
245
		return BLK_STS_OK;
246
	case NVME_SC_CAP_EXCEEDED:
247
		return BLK_STS_NOSPC;
248
	case NVME_SC_LBA_RANGE:
249 250
	case NVME_SC_CMD_INTERRUPTED:
	case NVME_SC_NS_NOT_READY:
251 252
		return BLK_STS_TARGET;
	case NVME_SC_BAD_ATTRIBUTES:
253
	case NVME_SC_ONCS_NOT_SUPPORTED:
254 255 256
	case NVME_SC_INVALID_OPCODE:
	case NVME_SC_INVALID_FIELD:
	case NVME_SC_INVALID_NS:
257
		return BLK_STS_NOTSUPP;
258 259 260
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
261 262
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
263
	case NVME_SC_COMPARE_FAILED:
264
		return BLK_STS_MEDIUM;
265 266 267 268 269 270 271
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
272 273
	case NVME_SC_HOST_PATH_ERROR:
		return BLK_STS_TRANSPORT;
K
Keith Busch 已提交
274 275 276 277
	case NVME_SC_ZONE_TOO_MANY_ACTIVE:
		return BLK_STS_ZONE_ACTIVE_RESOURCE;
	case NVME_SC_ZONE_TOO_MANY_OPEN:
		return BLK_STS_ZONE_OPEN_RESOURCE;
278 279
	default:
		return BLK_STS_IOERR;
280 281 282
	}
}

283 284 285 286 287 288 289
static void nvme_retry_req(struct request *req)
{
	unsigned long delay = 0;
	u16 crd;

	/* The mask and shift result must be <= 3 */
	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
290 291
	if (crd)
		delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
292 293 294 295 296 297

	nvme_req(req)->retries++;
	blk_mq_requeue_request(req, false);
	blk_mq_delay_kick_requeue_list(req->q, delay);
}

A
Alan Adamson 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
static void nvme_log_error(struct request *req)
{
	struct nvme_ns *ns = req->q->queuedata;
	struct nvme_request *nr = nvme_req(req);

	if (ns) {
		pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
		       ns->disk ? ns->disk->disk_name : "?",
		       nvme_get_opcode_str(nr->cmd->common.opcode),
		       nr->cmd->common.opcode,
		       (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
		       (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
		       nvme_get_error_status_str(nr->status),
		       nr->status >> 8 & 7,	/* Status Code Type */
		       nr->status & 0xff,	/* Status Code */
		       nr->status & NVME_SC_MORE ? "MORE " : "",
		       nr->status & NVME_SC_DNR  ? "DNR "  : "");
		return;
	}

	pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
			   dev_name(nr->ctrl->device),
			   nvme_get_admin_opcode_str(nr->cmd->common.opcode),
			   nr->cmd->common.opcode,
			   nvme_get_error_status_str(nr->status),
			   nr->status >> 8 & 7,	/* Status Code Type */
			   nr->status & 0xff,	/* Status Code */
			   nr->status & NVME_SC_MORE ? "MORE " : "",
			   nr->status & NVME_SC_DNR  ? "DNR "  : "");
}

329 330 331 332 333 334 335
enum nvme_disposition {
	COMPLETE,
	RETRY,
	FAILOVER,
};

static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
336
{
337 338
	if (likely(nvme_req(req)->status == 0))
		return COMPLETE;
339

340 341 342 343
	if (blk_noretry_request(req) ||
	    (nvme_req(req)->status & NVME_SC_DNR) ||
	    nvme_req(req)->retries >= nvme_max_retries)
		return COMPLETE;
344

345
	if (req->cmd_flags & REQ_NVME_MPATH) {
346 347
		if (nvme_is_path_error(nvme_req(req)->status) ||
		    blk_queue_dying(req->q))
348
			return FAILOVER;
349 350 351
	} else {
		if (blk_queue_dying(req->q))
			return COMPLETE;
352
	}
353

354 355
	return RETRY;
}
356

357
static inline void nvme_end_req_zoned(struct request *req)
358 359 360
{
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
	    req_op(req) == REQ_OP_ZONE_APPEND)
K
Keith Busch 已提交
361 362
		req->__sector = nvme_lba_to_sect(req->q->queuedata,
			le64_to_cpu(nvme_req(req)->result.u64));
363 364 365 366 367
}

static inline void nvme_end_req(struct request *req)
{
	blk_status_t status = nvme_error_status(nvme_req(req)->status);
H
Hannes Reinecke 已提交
368

369
	if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
A
Alan Adamson 已提交
370
		nvme_log_error(req);
371
	nvme_end_req_zoned(req);
372
	nvme_trace_bio_complete(req);
373
	blk_mq_end_request(req, status);
374
}
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395

void nvme_complete_rq(struct request *req)
{
	trace_nvme_complete_rq(req);
	nvme_cleanup_cmd(req);

	if (nvme_req(req)->ctrl->kas)
		nvme_req(req)->ctrl->comp_seen = true;

	switch (nvme_decide_disposition(req)) {
	case COMPLETE:
		nvme_end_req(req);
		return;
	case RETRY:
		nvme_retry_req(req);
		return;
	case FAILOVER:
		nvme_failover_req(req);
		return;
	}
}
396 397
EXPORT_SYMBOL_GPL(nvme_complete_rq);

398 399
void nvme_complete_batch_req(struct request *req)
{
400
	trace_nvme_complete_rq(req);
401 402 403 404 405
	nvme_cleanup_cmd(req);
	nvme_end_req_zoned(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/*
 * Called to unwind from ->queue_rq on a failed command submission so that the
 * multipathing code gets called to potentially failover to another path.
 * The caller needs to unwind all transport specific resource allocations and
 * must return propagate the return value.
 */
blk_status_t nvme_host_path_error(struct request *req)
{
	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
	blk_mq_set_request_complete(req);
	nvme_complete_rq(req);
	return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);

421
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
422 423 424 425
{
	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

426 427 428 429
	/* don't abort one completed request */
	if (blk_mq_request_completed(req))
		return true;

430
	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
431
	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
432
	blk_mq_complete_request(req);
433
	return true;
434 435 436
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

C
Chao Leng 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->tagset) {
		blk_mq_tagset_busy_iter(ctrl->tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);

void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->admin_tagset) {
		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);

457 458 459
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
460
	enum nvme_ctrl_state old_state;
461
	unsigned long flags;
462 463
	bool changed = false;

464
	spin_lock_irqsave(&ctrl->lock, flags);
465 466

	old_state = ctrl->state;
467 468 469
	switch (new_state) {
	case NVME_CTRL_LIVE:
		switch (old_state) {
470
		case NVME_CTRL_NEW:
471
		case NVME_CTRL_RESETTING:
472
		case NVME_CTRL_CONNECTING:
473
			changed = true;
474
			fallthrough;
475 476 477 478 479 480 481
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
482 483
		case NVME_CTRL_LIVE:
			changed = true;
484
			fallthrough;
485 486 487 488
		default:
			break;
		}
		break;
489
	case NVME_CTRL_CONNECTING:
490
		switch (old_state) {
491
		case NVME_CTRL_NEW:
492
		case NVME_CTRL_RESETTING:
493
			changed = true;
494
			fallthrough;
495 496 497 498 499 500 501 502
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
		case NVME_CTRL_RESETTING:
503
		case NVME_CTRL_CONNECTING:
504
			changed = true;
505
			fallthrough;
506 507 508 509
		default:
			break;
		}
		break;
510 511 512 513 514
	case NVME_CTRL_DELETING_NOIO:
		switch (old_state) {
		case NVME_CTRL_DELETING:
		case NVME_CTRL_DEAD:
			changed = true;
515
			fallthrough;
516 517 518 519
		default:
			break;
		}
		break;
520 521 522 523
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
524
			fallthrough;
525 526 527 528
		default:
			break;
		}
		break;
529 530 531 532
	default:
		break;
	}

533
	if (changed) {
534
		ctrl->state = new_state;
535 536
		wake_up_all(&ctrl->state_wq);
	}
537

538
	spin_unlock_irqrestore(&ctrl->lock, flags);
539 540 541 542 543 544
	if (!changed)
		return false;

	if (ctrl->state == NVME_CTRL_LIVE) {
		if (old_state == NVME_CTRL_CONNECTING)
			nvme_stop_failfast_work(ctrl);
545
		nvme_kick_requeue_lists(ctrl);
546 547 548 549
	} else if (ctrl->state == NVME_CTRL_CONNECTING &&
		old_state == NVME_CTRL_RESETTING) {
		nvme_start_failfast_work(ctrl);
	}
550 551 552 553
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

554 555 556 557 558 559 560 561 562 563 564 565
/*
 * Returns true for sink states that can't ever transition back to live.
 */
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
{
	switch (ctrl->state) {
	case NVME_CTRL_NEW:
	case NVME_CTRL_LIVE:
	case NVME_CTRL_RESETTING:
	case NVME_CTRL_CONNECTING:
		return false;
	case NVME_CTRL_DELETING:
566
	case NVME_CTRL_DELETING_NOIO:
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	case NVME_CTRL_DEAD:
		return true;
	default:
		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
		return true;
	}
}

/*
 * Waits for the controller state to be resetting, or returns false if it is
 * not possible to ever transition to that state.
 */
bool nvme_wait_reset(struct nvme_ctrl *ctrl)
{
	wait_event(ctrl->state_wq,
		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
		   nvme_state_terminal(ctrl));
	return ctrl->state == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);

C
Christoph Hellwig 已提交
588 589 590 591 592
static void nvme_free_ns_head(struct kref *ref)
{
	struct nvme_ns_head *head =
		container_of(ref, struct nvme_ns_head, ref);

593
	nvme_mpath_remove_disk(head);
594
	ida_free(&head->subsys->ns_ida, head->instance);
595
	cleanup_srcu_struct(&head->srcu);
596
	nvme_put_subsystem(head->subsys);
C
Christoph Hellwig 已提交
597 598 599
	kfree(head);
}

600
bool nvme_tryget_ns_head(struct nvme_ns_head *head)
601 602 603 604
{
	return kref_get_unless_zero(&head->ref);
}

605
void nvme_put_ns_head(struct nvme_ns_head *head)
C
Christoph Hellwig 已提交
606 607 608 609
{
	kref_put(&head->ref, nvme_free_ns_head);
}

610 611 612 613 614
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

	put_disk(ns->disk);
C
Christoph Hellwig 已提交
615
	nvme_put_ns_head(ns->head);
616
	nvme_put_ctrl(ns->ctrl);
617 618 619
	kfree(ns);
}

K
Kanchan Joshi 已提交
620 621 622 623 624
static inline bool nvme_get_ns(struct nvme_ns *ns)
{
	return kref_get_unless_zero(&ns->kref);
}

625
void nvme_put_ns(struct nvme_ns *ns)
626 627 628
{
	kref_put(&ns->kref, nvme_free_ns);
}
629
EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
630

631 632
static inline void nvme_clear_nvme_request(struct request *req)
{
633
	nvme_req(req)->status = 0;
634 635 636
	nvme_req(req)->retries = 0;
	nvme_req(req)->flags = 0;
	req->rq_flags |= RQF_DONTPREP;
637 638
}

639 640
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
641
{
642 643 644
	if (req->q->queuedata)
		req->timeout = NVME_IO_TIMEOUT;
	else /* no queuedata implies admin queue */
645
		req->timeout = NVME_ADMIN_TIMEOUT;
646

647 648 649
	/* passthru commands should let the driver set the SGL flags */
	cmd->common.flags &= ~NVME_CMD_SGL_ALL;

650
	req->cmd_flags |= REQ_FAILFAST_DRIVER;
651
	if (req->mq_hctx->type == HCTX_TYPE_POLL)
652
		req->cmd_flags |= REQ_POLLED;
653
	nvme_clear_nvme_request(req);
654
	memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
655
}
656
EXPORT_SYMBOL_GPL(nvme_init_request);
657

658 659 660 661 662 663 664 665 666 667 668 669 670
/*
 * For something we're not in a state to send to the device the default action
 * is to busy it and retry it after the controller state is recovered.  However,
 * if the controller is deleting or if anything is marked for failfast or
 * nvme multipath it is immediately failed.
 *
 * Note: commands used to initialize the controller will be marked for failfast.
 * Note: nvme cli/ioctl commands are marked for failfast.
 */
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
		struct request *rq)
{
	if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
671
	    ctrl->state != NVME_CTRL_DELETING &&
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	    ctrl->state != NVME_CTRL_DEAD &&
	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
		return BLK_STS_RESOURCE;
	return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);

bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live)
{
	struct nvme_request *req = nvme_req(rq);

	/*
	 * currently we have a problem sending passthru commands
	 * on the admin_q if the controller is not LIVE because we can't
	 * make sure that they are going out after the admin connect,
	 * controller enable and/or other commands in the initialization
	 * sequence. until the controller will be LIVE, fail with
	 * BLK_STS_RESOURCE so that they will be rescheduled.
	 */
	if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
		return false;

	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * Only allow commands on a live queue, except for the connect
		 * command, which is require to set the queue live in the
		 * appropinquate states.
		 */
		switch (ctrl->state) {
		case NVME_CTRL_CONNECTING:
			if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
			    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
				return true;
			break;
		default:
			break;
		case NVME_CTRL_DEAD:
			return false;
		}
	}

	return queue_live;
}
EXPORT_SYMBOL_GPL(__nvme_check_ready);

M
Ming Lin 已提交
719 720 721
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
722
	memset(cmnd, 0, sizeof(*cmnd));
M
Ming Lin 已提交
723
	cmnd->common.opcode = nvme_cmd_flush;
C
Christoph Hellwig 已提交
724
	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
M
Ming Lin 已提交
725 726
}

727
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
728 729
		struct nvme_command *cmnd)
{
730
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
M
Ming Lin 已提交
731
	struct nvme_dsm_range *range;
732
	struct bio *bio;
M
Ming Lin 已提交
733

734 735 736 737 738 739 740 741
	/*
	 * Some devices do not consider the DSM 'Number of Ranges' field when
	 * determining how much data to DMA. Always allocate memory for maximum
	 * number of segments to prevent device reading beyond end of buffer.
	 */
	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;

	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
742 743 744 745 746 747 748 749 750 751 752
	if (!range) {
		/*
		 * If we fail allocation our range, fallback to the controller
		 * discard page. If that's also busy, it's safe to return
		 * busy, as we know we can make progress once that's freed.
		 */
		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
			return BLK_STS_RESOURCE;

		range = page_address(ns->ctrl->discard_page);
	}
M
Ming Lin 已提交
753

754
	__rq_for_each_bio(bio, req) {
755
		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
756 757
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

K
Keith Busch 已提交
758 759 760 761 762
		if (n < segments) {
			range[n].cattr = cpu_to_le32(0);
			range[n].nlb = cpu_to_le32(nlb);
			range[n].slba = cpu_to_le64(slba);
		}
763 764 765 766
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
767 768 769 770
		if (virt_to_page(range) == ns->ctrl->discard_page)
			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
		else
			kfree(range);
771
		return BLK_STS_IOERR;
772
	}
M
Ming Lin 已提交
773

774
	memset(cmnd, 0, sizeof(*cmnd));
M
Ming Lin 已提交
775
	cmnd->dsm.opcode = nvme_cmd_dsm;
C
Christoph Hellwig 已提交
776
	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
777
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
M
Ming Lin 已提交
778 779
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

780 781
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
782
	req->special_vec.bv_len = alloc_size;
783
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
M
Ming Lin 已提交
784

785
	return BLK_STS_OK;
M
Ming Lin 已提交
786 787
}

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
			      struct request *req)
{
	u32 upper, lower;
	u64 ref48;

	/* both rw and write zeroes share the same reftag format */
	switch (ns->guard_type) {
	case NVME_NVM_NS_16B_GUARD:
		cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
		break;
	case NVME_NVM_NS_64B_GUARD:
		ref48 = ext_pi_ref_tag(req);
		lower = lower_32_bits(ref48);
		upper = upper_32_bits(ref48);

		cmnd->rw.reftag = cpu_to_le32(lower);
		cmnd->rw.cdw3 = cpu_to_le32(upper);
		break;
	default:
		break;
	}
}

812 813 814
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
{
815 816
	memset(cmnd, 0, sizeof(*cmnd));

817 818 819 820 821 822
	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		return nvme_setup_discard(ns, req, cmnd);

	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
	cmnd->write_zeroes.slba =
823
		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
824 825
	cmnd->write_zeroes.length =
		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
K
Klaus Jensen 已提交
826 827

	if (nvme_ns_has_pi(ns)) {
828
		cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
K
Klaus Jensen 已提交
829 830 831 832

		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
833
			nvme_set_ref_tag(ns, cmnd, req);
K
Klaus Jensen 已提交
834 835 836 837
			break;
		}
	}

838 839 840
	return BLK_STS_OK;
}

841
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
K
Keith Busch 已提交
842 843
		struct request *req, struct nvme_command *cmnd,
		enum nvme_opcode op)
M
Ming Lin 已提交
844 845 846 847 848 849 850 851 852 853 854 855
{
	u16 control = 0;
	u32 dsmgmt = 0;

	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

K
Keith Busch 已提交
856
	cmnd->rw.opcode = op;
857
	cmnd->rw.flags = 0;
C
Christoph Hellwig 已提交
858
	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
859 860
	cmnd->rw.cdw2 = 0;
	cmnd->rw.cdw3 = 0;
861
	cmnd->rw.metadata = 0;
862
	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
M
Ming Lin 已提交
863
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
864 865 866
	cmnd->rw.reftag = 0;
	cmnd->rw.apptag = 0;
	cmnd->rw.appmask = 0;
M
Ming Lin 已提交
867 868

	if (ns->ms) {
869 870 871 872 873 874 875 876 877 878 879 880
		/*
		 * If formated with metadata, the block layer always provides a
		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
		 * we enable the PRACT bit for protection information or set the
		 * namespace capacity to zero to prevent any I/O.
		 */
		if (!blk_integrity_rq(req)) {
			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
				return BLK_STS_NOTSUPP;
			control |= NVME_RW_PRINFO_PRACT;
		}

M
Ming Lin 已提交
881 882 883 884 885 886 887 888
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
K
Keith Busch 已提交
889 890
			if (op == nvme_cmd_zone_append)
				control |= NVME_RW_APPEND_PIREMAP;
891
			nvme_set_ref_tag(ns, cmnd, req);
M
Ming Lin 已提交
892 893 894 895 896 897
			break;
		}
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
898
	return 0;
M
Ming Lin 已提交
899 900
}

901 902 903
void nvme_cleanup_cmd(struct request *req)
{
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
M
Minwoo Im 已提交
904
		struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
905

C
Christoph Hellwig 已提交
906
		if (req->special_vec.bv_page == ctrl->discard_page)
M
Minwoo Im 已提交
907
			clear_bit_unlock(0, &ctrl->discard_page_busy);
908
		else
C
Christoph Hellwig 已提交
909
			kfree(bvec_virt(&req->special_vec));
910 911 912 913
	}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

914
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
M
Ming Lin 已提交
915
{
916
	struct nvme_command *cmd = nvme_req(req)->cmd;
917
	blk_status_t ret = BLK_STS_OK;
M
Ming Lin 已提交
918

919
	if (!(req->rq_flags & RQF_DONTPREP))
920
		nvme_clear_nvme_request(req);
921

922 923 924
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
925
		/* these are setup prior to execution in nvme_init_request() */
926 927
		break;
	case REQ_OP_FLUSH:
M
Ming Lin 已提交
928
		nvme_setup_flush(ns, cmd);
929
		break;
K
Keith Busch 已提交
930 931 932 933 934 935 936 937 938 939 940 941 942
	case REQ_OP_ZONE_RESET_ALL:
	case REQ_OP_ZONE_RESET:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
		break;
	case REQ_OP_ZONE_OPEN:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
		break;
	case REQ_OP_ZONE_CLOSE:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
		break;
	case REQ_OP_ZONE_FINISH:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
		break;
943
	case REQ_OP_WRITE_ZEROES:
944 945
		ret = nvme_setup_write_zeroes(ns, req, cmd);
		break;
946
	case REQ_OP_DISCARD:
M
Ming Lin 已提交
947
		ret = nvme_setup_discard(ns, req, cmd);
948 949
		break;
	case REQ_OP_READ:
K
Keith Busch 已提交
950 951
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
		break;
952
	case REQ_OP_WRITE:
K
Keith Busch 已提交
953 954 955 956
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
		break;
	case REQ_OP_ZONE_APPEND:
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
957 958 959
		break;
	default:
		WARN_ON_ONCE(1);
960
		return BLK_STS_IOERR;
961
	}
M
Ming Lin 已提交
962

963
	cmd->common.command_id = nvme_cid(req);
K
Keith Busch 已提交
964
	trace_nvme_setup_cmd(req, cmd);
M
Ming Lin 已提交
965 966 967 968
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

969 970 971 972 973 974
/*
 * Return values:
 * 0:  success
 * >0: nvme controller's cqe status response
 * <0: kernel error in lieu of controller response
 */
975
static int nvme_execute_rq(struct request *rq, bool at_head)
976 977 978
{
	blk_status_t status;

979
	status = blk_execute_rq(rq, at_head);
980 981 982 983 984 985 986
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		return -EINTR;
	if (nvme_req(rq)->status)
		return nvme_req(rq)->status;
	return blk_status_to_errno(status);
}

987 988 989 990 991
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
992
		union nvme_result *result, void *buffer, unsigned bufflen,
993
		unsigned timeout, int qid, int at_head,
994
		blk_mq_req_flags_t flags)
995 996 997 998
{
	struct request *req;
	int ret;

999
	if (qid == NVME_QID_ANY)
1000
		req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
1001
	else
1002 1003 1004
		req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
						qid ? qid - 1 : 0);

1005 1006
	if (IS_ERR(req))
		return PTR_ERR(req);
1007
	nvme_init_request(req, cmd);
1008

1009 1010
	if (timeout)
		req->timeout = timeout;
1011

1012 1013 1014 1015
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
1016 1017
	}

1018
	req->rq_flags |= RQF_QUIET;
1019
	ret = nvme_execute_rq(req, at_head);
1020
	if (result && ret >= 0)
1021
		*result = nvme_req(req)->result;
1022 1023 1024 1025
 out:
	blk_mq_free_request(req);
	return ret;
}
1026
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1027 1028 1029 1030

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
1031
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1032
			NVME_QID_ANY, 0, 0);
1033
}
1034
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1035

1036 1037 1038 1039
static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
1040
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1041 1042
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
1043
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	default:
		break;
	}
	return 0;
}

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = 0;

	if (ns) {
		if (ns->head->effects)
			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1058 1059 1060
			dev_warn_once(ctrl->device,
				"IO command:%02x has unhandled effects:%08x\n",
				opcode, effects);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		return 0;
	}

	if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
	effects |= nvme_known_admin_effects(opcode);

	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);

static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			       u8 opcode)
{
	u32 effects = nvme_command_effects(ctrl, ns, opcode);

	/*
	 * For simplicity, IO to all namespaces is quiesced even if the command
	 * effects say only one namespace is affected.
	 */
1081
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		mutex_lock(&ctrl->scan_lock);
		mutex_lock(&ctrl->subsys->lock);
		nvme_mpath_start_freeze(ctrl->subsys);
		nvme_mpath_wait_freeze(ctrl->subsys);
		nvme_start_freeze(ctrl);
		nvme_wait_freeze(ctrl);
	}
	return effects;
}

1092 1093
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
			      struct nvme_command *cmd, int status)
1094
{
1095
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1096 1097 1098 1099 1100 1101 1102
		nvme_unfreeze(ctrl);
		nvme_mpath_unfreeze(ctrl->subsys);
		mutex_unlock(&ctrl->subsys->lock);
		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
		mutex_unlock(&ctrl->scan_lock);
	}
	if (effects & NVME_CMD_EFFECTS_CCC)
1103
		nvme_init_ctrl_finish(ctrl);
1104 1105 1106 1107
	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
		nvme_queue_scan(ctrl);
		flush_work(&ctrl->scan_work);
	}
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127

	switch (cmd->common.opcode) {
	case nvme_admin_set_features:
		switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
		case NVME_FEAT_KATO:
			/*
			 * Keep alive commands interval on the host should be
			 * updated when KATO is modified by Set Features
			 * commands.
			 */
			if (!status)
				nvme_update_keep_alive(ctrl, cmd);
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}
1128 1129
}

1130
int nvme_execute_passthru_rq(struct request *rq)
1131 1132 1133 1134 1135
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;
	u32 effects;
1136
	int  ret;
1137 1138

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1139
	ret = nvme_execute_rq(rq, false);
1140
	if (effects) /* nothing to be done for zero cmd effects */
1141
		nvme_passthru_end(ctrl, effects, cmd, ret);
1142 1143

	return ret;
1144 1145 1146
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);

H
Hannes Reinecke 已提交
1147 1148 1149 1150 1151 1152 1153
/*
 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
 * 
 *   The host should send Keep Alive commands at half of the Keep Alive Timeout
 *   accounting for transport roundtrip times [..].
 */
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1154
{
H
Hannes Reinecke 已提交
1155
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1156 1157
}

1158
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
S
Sagi Grimberg 已提交
1159 1160
{
	struct nvme_ctrl *ctrl = rq->end_io_data;
1161 1162
	unsigned long flags;
	bool startka = false;
S
Sagi Grimberg 已提交
1163 1164 1165

	blk_mq_free_request(rq);

1166
	if (status) {
S
Sagi Grimberg 已提交
1167
		dev_err(ctrl->device,
1168 1169
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
S
Sagi Grimberg 已提交
1170 1171 1172
		return;
	}

1173
	ctrl->comp_seen = false;
1174 1175 1176 1177 1178 1179
	spin_lock_irqsave(&ctrl->lock, flags);
	if (ctrl->state == NVME_CTRL_LIVE ||
	    ctrl->state == NVME_CTRL_CONNECTING)
		startka = true;
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
H
Hannes Reinecke 已提交
1180
		nvme_queue_keep_alive_work(ctrl);
S
Sagi Grimberg 已提交
1181 1182 1183 1184 1185 1186
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);
1187
	bool comp_seen = ctrl->comp_seen;
1188
	struct request *rq;
1189 1190 1191 1192 1193

	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
		dev_dbg(ctrl->device,
			"reschedule traffic based keep-alive timer\n");
		ctrl->comp_seen = false;
H
Hannes Reinecke 已提交
1194
		nvme_queue_keep_alive_work(ctrl);
1195 1196
		return;
	}
S
Sagi Grimberg 已提交
1197

1198 1199
	rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
				  BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1200
	if (IS_ERR(rq)) {
S
Sagi Grimberg 已提交
1201
		/* allocation failure, reset the controller */
1202
		dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1203
		nvme_reset_ctrl(ctrl);
S
Sagi Grimberg 已提交
1204 1205
		return;
	}
1206
	nvme_init_request(rq, &ctrl->ka_cmd);
1207 1208

	rq->timeout = ctrl->kato * HZ;
1209
	rq->end_io = nvme_keep_alive_end_io;
1210
	rq->end_io_data = ctrl;
1211
	rq->rq_flags |= RQF_QUIET;
1212
	blk_execute_rq_nowait(rq, false);
S
Sagi Grimberg 已提交
1213 1214
}

1215
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
S
Sagi Grimberg 已提交
1216 1217 1218 1219
{
	if (unlikely(ctrl->kato == 0))
		return;

H
Hannes Reinecke 已提交
1220
	nvme_queue_keep_alive_work(ctrl);
S
Sagi Grimberg 已提交
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd)
{
	unsigned int new_kato =
		DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);

	dev_info(ctrl->device,
		 "keep alive interval updated from %u ms to %u ms\n",
		 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);

	nvme_stop_keep_alive(ctrl);
	ctrl->kato = new_kato;
	nvme_start_keep_alive(ctrl);
}

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
/*
 * In NVMe 1.0 the CNS field was just a binary controller or namespace
 * flag, thus sending any new CNS opcodes has a big chance of not working.
 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
 * (but not for any later version).
 */
static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
{
	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
		return ctrl->vs < NVME_VS(1, 2, 0);
	return ctrl->vs < NVME_VS(1, 1, 0);
}

K
Keith Busch 已提交
1260
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1261 1262 1263 1264 1265 1266
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
1267
	c.identify.cns = NVME_ID_CNS_CTRL;
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

1280
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1281
		struct nvme_ns_id_desc *cur, bool *csi_seen)
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
{
	const char *warn_str = "ctrl returned bogus length:";
	void *data = cur;

	switch (cur->nidt) {
	case NVME_NIDT_EUI64:
		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
				 warn_str, cur->nidl);
			return -1;
		}
1293 1294
		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
			return NVME_NIDT_EUI64_LEN;
1295 1296 1297 1298 1299 1300 1301 1302
		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
		return NVME_NIDT_EUI64_LEN;
	case NVME_NIDT_NGUID:
		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
1303 1304
		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
			return NVME_NIDT_NGUID_LEN;
1305 1306 1307 1308 1309 1310 1311 1312
		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
		return NVME_NIDT_NGUID_LEN;
	case NVME_NIDT_UUID:
		if (cur->nidl != NVME_NIDT_UUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
1313 1314
		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
			return NVME_NIDT_UUID_LEN;
1315 1316
		uuid_copy(&ids->uuid, data + sizeof(*cur));
		return NVME_NIDT_UUID_LEN;
1317 1318 1319 1320 1321 1322 1323 1324 1325
	case NVME_NIDT_CSI:
		if (cur->nidl != NVME_NIDT_CSI_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
		*csi_seen = true;
		return NVME_NIDT_CSI_LEN;
1326 1327 1328 1329 1330 1331
	default:
		/* Skip unknown types */
		return cur->nidl;
	}
}

1332
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1333
		struct nvme_ns_ids *ids)
1334 1335
{
	struct nvme_command c = { };
1336 1337
	bool csi_seen = false;
	int status, pos, len;
1338 1339
	void *data;

1340 1341
	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
		return 0;
1342 1343 1344
	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
		return 0;

1345 1346 1347 1348 1349 1350 1351 1352
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

1353
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1354
				      NVME_IDENTIFY_DATA_SIZE);
1355 1356
	if (status) {
		dev_warn(ctrl->device,
1357 1358
			"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
			nsid, status);
1359
		goto free_data;
1360
	}
1361 1362 1363 1364 1365 1366 1367

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

1368
		len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1369
		if (len < 0)
1370
			break;
1371 1372 1373

		len += sizeof(*cur);
	}
1374 1375 1376 1377 1378 1379 1380

	if (nvme_multi_css(ctrl) && !csi_seen) {
		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
			 nsid);
		status = -EINVAL;
	}

1381 1382 1383 1384 1385
free_data:
	kfree(data);
	return status;
}

1386 1387
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1388 1389 1390 1391 1392
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1393 1394
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
1395
	c.identify.cns = NVME_ID_CNS_NS;
1396

1397 1398 1399
	*id = kmalloc(sizeof(**id), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;
1400

1401
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1402
	if (error) {
1403
		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1404
		goto out_free_id;
1405 1406
	}

1407
	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1408 1409
	if ((*id)->ncap == 0) /* namespace not allocated or attached */
		goto out_free_id;
1410

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422

	if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
		dev_info(ctrl->device,
			 "Ignoring bogus Namespace Identifiers\n");
	} else {
		if (ctrl->vs >= NVME_VS(1, 1, 0) &&
		    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
			memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
		if (ctrl->vs >= NVME_VS(1, 2, 0) &&
		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
			memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
	}
1423

1424 1425 1426 1427
	return 0;

out_free_id:
	kfree(*id);
1428
	return error;
1429 1430
}

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_id_ns_cs_indep **id)
{
	struct nvme_command c = {
		.identify.opcode	= nvme_admin_identify,
		.identify.nsid		= cpu_to_le32(nsid),
		.identify.cns		= NVME_ID_CNS_NS_CS_INDEP,
	};
	int ret;

	*id = kmalloc(sizeof(**id), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
	if (ret) {
		dev_warn(ctrl->device,
			 "Identify namespace (CS independent) failed (%d)\n",
			 ret);
		kfree(*id);
		return ret;
	}

	return 0;
}

K
Keith Busch 已提交
1457 1458
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1459
{
1460
	union nvme_result res = { 0 };
1461
	struct nvme_command c = { };
1462
	int ret;
1463

K
Keith Busch 已提交
1464
	c.features.opcode = op;
1465 1466 1467
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1468
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1469
			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1470
	if (ret >= 0 && result)
1471
		*result = le32_to_cpu(res.u32);
1472
	return ret;
1473 1474
}

K
Keith Busch 已提交
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_set_features);

int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_get_features);

C
Christoph Hellwig 已提交
1493 1494 1495 1496 1497 1498
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

1499
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
C
Christoph Hellwig 已提交
1500
			&result);
1501
	if (status < 0)
C
Christoph Hellwig 已提交
1502 1503
		return status;

1504 1505 1506 1507 1508 1509
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
1510
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1511 1512 1513 1514 1515 1516
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

C
Christoph Hellwig 已提交
1517 1518
	return 0;
}
1519
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
C
Christoph Hellwig 已提交
1520

1521
#define NVME_AEN_SUPPORTED \
1522 1523
	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1524 1525 1526

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
1527
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1528 1529
	int status;

1530 1531 1532 1533 1534
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
1535 1536
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1537
			 supported_aens);
1538 1539

	queue_work(nvme_wq, &ctrl->async_event_work);
1540 1541
}

1542
static int nvme_ns_open(struct nvme_ns *ns)
1543 1544
{

1545
	/* should never be called due to GENHD_FL_HIDDEN */
1546
	if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1547
		goto fail;
K
Kanchan Joshi 已提交
1548
	if (!nvme_get_ns(ns))
1549 1550 1551 1552
		goto fail;
	if (!try_module_get(ns->ctrl->ops->module))
		goto fail_put_ns;

C
Christoph Hellwig 已提交
1553
	return 0;
1554 1555 1556 1557 1558

fail_put_ns:
	nvme_put_ns(ns);
fail:
	return -ENXIO;
1559 1560
}

1561
static void nvme_ns_release(struct nvme_ns *ns)
1562
{
1563 1564 1565

	module_put(ns->ctrl->ops->module);
	nvme_put_ns(ns);
1566 1567
}

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
	return nvme_ns_open(bdev->bd_disk->private_data);
}

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
	nvme_ns_release(disk->private_data);
}

1578
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1579 1580 1581 1582 1583 1584 1585 1586 1587
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
	return 0;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
1588
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1589
				u32 max_integrity_segments)
1590
{
1591
	struct blk_integrity integrity = { };
1592

1593
	switch (ns->pi_type) {
1594
	case NVME_NS_DPS_PI_TYPE3:
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
		switch (ns->guard_type) {
		case NVME_NVM_NS_16B_GUARD:
			integrity.profile = &t10_pi_type3_crc;
			integrity.tag_size = sizeof(u16) + sizeof(u32);
			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
			break;
		case NVME_NVM_NS_64B_GUARD:
			integrity.profile = &ext_pi_type3_crc64;
			integrity.tag_size = sizeof(u16) + 6;
			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
			break;
		default:
			integrity.profile = NULL;
			break;
		}
1610 1611 1612
		break;
	case NVME_NS_DPS_PI_TYPE1:
	case NVME_NS_DPS_PI_TYPE2:
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
		switch (ns->guard_type) {
		case NVME_NVM_NS_16B_GUARD:
			integrity.profile = &t10_pi_type1_crc;
			integrity.tag_size = sizeof(u16);
			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
			break;
		case NVME_NVM_NS_64B_GUARD:
			integrity.profile = &ext_pi_type1_crc64;
			integrity.tag_size = sizeof(u16);
			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
			break;
		default:
			integrity.profile = NULL;
			break;
		}
1628 1629 1630 1631 1632
		break;
	default:
		integrity.profile = NULL;
		break;
	}
1633 1634

	integrity.tuple_size = ns->ms;
1635
	blk_integrity_register(disk, &integrity);
1636
	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1637 1638
}
#else
1639
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1640
				u32 max_integrity_segments)
1641 1642 1643 1644
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

1645
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1646
{
1647
	struct nvme_ctrl *ctrl = ns->ctrl;
1648
	struct request_queue *queue = disk->queue;
1649 1650
	u32 size = queue_logical_block_size(queue);

1651
	if (ctrl->max_discard_sectors == 0) {
1652
		blk_queue_max_discard_sectors(queue, 0);
1653 1654 1655
		return;
	}

1656 1657 1658
	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

1659
	queue->limits.discard_granularity = size;
1660

1661
	/* If discard is already enabled, don't reset queue limits */
1662
	if (queue->limits.max_discard_sectors)
1663 1664
		return;

T
Tom Yan 已提交
1665 1666 1667
	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);

1668 1669
	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1670 1671

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1672
		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1673 1674
}

1675 1676 1677 1678
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
	return uuid_equal(&a->uuid, &b->uuid) &&
		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1679 1680
		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
		a->csi == b->csi;
1681 1682
}

1683
static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
1684
{
1685 1686
	bool first = id->dps & NVME_NS_DPS_PI_FIRST;
	unsigned lbaf = nvme_lbaf_index(id->flbas);
1687
	struct nvme_ctrl *ctrl = ns->ctrl;
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	struct nvme_command c = { };
	struct nvme_id_ns_nvm *nvm;
	int ret = 0;
	u32 elbaf;

	ns->pi_size = 0;
	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
	if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
		ns->pi_size = sizeof(struct t10_pi_tuple);
		ns->guard_type = NVME_NVM_NS_16B_GUARD;
		goto set_pi;
	}
1700

1701 1702 1703
	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
	if (!nvm)
		return -ENOMEM;
1704

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(ns->head->ns_id);
	c.identify.cns = NVME_ID_CNS_CS_NS;
	c.identify.csi = NVME_CSI_NVM;

	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
	if (ret)
		goto free_data;

	elbaf = le32_to_cpu(nvm->elbaf[lbaf]);

	/* no support for storage tag formats right now */
	if (nvme_elbaf_sts(elbaf))
		goto free_data;

	ns->guard_type = nvme_elbaf_guard_type(elbaf);
	switch (ns->guard_type) {
	case NVME_NVM_NS_64B_GUARD:
		ns->pi_size = sizeof(struct crc64_pi_tuple);
		break;
	case NVME_NVM_NS_16B_GUARD:
		ns->pi_size = sizeof(struct t10_pi_tuple);
		break;
	default:
		break;
	}

free_data:
	kfree(nvm);
set_pi:
	if (ns->pi_size && (first || ns->ms == ns->pi_size))
1736 1737 1738 1739
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;

1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
	return ret;
}

static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;

	if (nvme_init_ms(ns, id))
		return;

1750 1751
	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1752 1753
		return;

1754 1755 1756 1757 1758 1759 1760
	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * The NVMe over Fabrics specification only supports metadata as
		 * part of the extended data LBA.  We rely on HCA/HBA support to
		 * remap the separate metadata buffer from the block layer.
		 */
		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1761
			return;
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775

		ns->features |= NVME_NS_EXT_LBAS;

		/*
		 * The current fabrics transport drivers support namespace
		 * metadata formats only if nvme_ns_has_pi() returns true.
		 * Suppress support for all other formats so the namespace will
		 * have a 0 capacity and not be usable through the block stack.
		 *
		 * Note, this check will need to be modified if any drivers
		 * gain the ability to use other metadata formats.
		 */
		if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
			ns->features |= NVME_NS_METADATA_SUPPORTED;
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
	} else {
		/*
		 * For PCIe controllers, we can't easily remap the separate
		 * metadata buffer from the block layer and thus require a
		 * separate metadata buffer for block layer metadata/PI support.
		 * We allow extended LBAs for the passthrough interface, though.
		 */
		if (id->flbas & NVME_NS_FLBAS_META_EXT)
			ns->features |= NVME_NS_EXT_LBAS;
		else
			ns->features |= NVME_NS_METADATA_SUPPORTED;
	}
}

1790 1791 1792
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
1793
	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803

	if (ctrl->max_hw_sectors) {
		u32 max_segments =
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;

		max_segments = min_not_zero(max_segments, ctrl->max_segments);
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
	}
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
K
Keith Busch 已提交
1804
	blk_queue_dma_alignment(q, 3);
1805 1806 1807
	blk_queue_write_cache(q, vwc, vwc);
}

1808 1809 1810
static void nvme_update_disk_info(struct gendisk *disk,
		struct nvme_ns *ns, struct nvme_id_ns *id)
{
1811
	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1812
	unsigned short bs = 1 << ns->lba_shift;
D
Damien Le Moal 已提交
1813
	u32 atomic_bs, phys_bs, io_opt = 0;
1814

1815 1816 1817 1818
	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
1819
	if (ns->lba_shift > PAGE_SHIFT) {
1820
		capacity = 0;
1821 1822
		bs = (1 << 9);
	}
1823

1824 1825
	blk_integrity_unregister(disk);

D
Damien Le Moal 已提交
1826
	atomic_bs = phys_bs = bs;
1827 1828 1829 1830 1831 1832
	if (id->nabo == 0) {
		/*
		 * Bit 1 indicates whether NAWUPF is defined for this namespace
		 * and whether it should be used instead of AWUPF. If NAWUPF ==
		 * 0 then AWUPF must be used instead.
		 */
1833
		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1834 1835 1836 1837
			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
		else
			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
	}
K
Keith Busch 已提交
1838

1839
	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1840
		/* NPWG = Namespace Preferred Write Granularity */
K
Keith Busch 已提交
1841
		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1842
		/* NOWS = Namespace Optimal Write Size */
K
Keith Busch 已提交
1843
		io_opt = bs * (1 + le16_to_cpu(id->nows));
1844 1845
	}

1846
	blk_queue_logical_block_size(disk->queue, bs);
1847 1848 1849 1850 1851 1852 1853 1854
	/*
	 * Linux filesystems assume writing a single physical block is
	 * an atomic operation. Hence limit the physical block size to the
	 * value of the Atomic Write Unit Power Fail parameter.
	 */
	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
	blk_queue_io_min(disk->queue, phys_bs);
	blk_queue_io_opt(disk->queue, io_opt);
1855

1856 1857 1858 1859 1860 1861 1862 1863 1864
	/*
	 * Register a metadata profile for PI, or the plain non-integrity NVMe
	 * metadata masquerading as Type 0 if supported, otherwise reject block
	 * I/O to namespaces with metadata except when the namespace supports
	 * PI, as it can strip/insert in that case.
	 */
	if (ns->ms) {
		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1865
			nvme_init_integrity(disk, ns,
1866
					    ns->ctrl->max_integrity_segments);
1867 1868 1869 1870
		else if (!nvme_ns_has_pi(ns))
			capacity = 0;
	}

1871
	set_capacity_and_notify(disk, capacity);
1872

1873
	nvme_config_discard(disk, ns);
1874 1875
	blk_queue_max_write_zeroes_sectors(disk->queue,
					   ns->ctrl->max_zeroes_sectors);
1876 1877
}

1878 1879 1880
static inline bool nvme_first_scan(struct gendisk *disk)
{
	/* nvme_alloc_ns() scans the disk prior to adding it */
C
Christoph Hellwig 已提交
1881
	return !disk_live(disk);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
}

static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	u32 iob;

	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
	    is_power_of_2(ctrl->max_hw_sectors))
		iob = ctrl->max_hw_sectors;
	else
		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));

	if (!iob)
		return;

	if (!is_power_of_2(iob)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
				ns->disk->disk_name, iob);
		return;
	}

	if (blk_queue_is_zoned(ns->disk->queue)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring zoned namespace IO boundary\n",
				ns->disk->disk_name);
		return;
	}

	blk_queue_chunk_sectors(ns->queue, iob);
}

1915
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1916
{
1917
	unsigned lbaf = nvme_lbaf_index(id->flbas);
K
Keith Busch 已提交
1918
	int ret;
1919

1920
	blk_mq_freeze_queue(ns->disk->queue);
K
Keith Busch 已提交
1921
	ns->lba_shift = id->lbaf[lbaf].ds;
1922
	nvme_set_queue_limits(ns->ctrl, ns->queue);
1923

1924
	nvme_configure_metadata(ns, id);
1925 1926 1927
	nvme_set_chunk_sectors(ns, id);
	nvme_update_disk_info(ns->disk, ns, id);

1928
	if (ns->head->ids.csi == NVME_CSI_ZNS) {
1929
		ret = nvme_update_zone_info(ns, lbaf);
1930
		if (ret)
1931
			goto out_unfreeze;
1932 1933
	}

1934 1935
	set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
		test_bit(NVME_NS_FORCE_RO, &ns->flags));
1936
	set_bit(NVME_NS_READY, &ns->flags);
1937
	blk_mq_unfreeze_queue(ns->disk->queue);
1938

1939 1940
	if (blk_queue_is_zoned(ns->queue)) {
		ret = nvme_revalidate_zones(ns);
1941
		if (ret && !nvme_first_scan(ns->disk))
1942
			return ret;
1943 1944
	}

1945
	if (nvme_ns_head_multipath(ns->head)) {
1946
		blk_mq_freeze_queue(ns->head->disk->queue);
1947
		nvme_update_disk_info(ns->head->disk, ns, id);
1948 1949 1950
		set_disk_ro(ns->head->disk,
			    (id->nsattr & NVME_NS_ATTR_RO) ||
				    test_bit(NVME_NS_FORCE_RO, &ns->flags));
1951
		nvme_mpath_revalidate_paths(ns);
1952 1953
		blk_stack_limits(&ns->head->disk->queue->limits,
				 &ns->queue->limits, 0);
1954
		disk_update_readahead(ns->head->disk);
1955
		blk_mq_unfreeze_queue(ns->head->disk->queue);
1956
	}
1957
	return 0;
1958

1959
out_unfreeze:
1960 1961 1962 1963 1964 1965
	/*
	 * If probing fails due an unsupported feature, hide the block device,
	 * but still allow other access.
	 */
	if (ret == -ENODEV) {
		ns->disk->flags |= GENHD_FL_HIDDEN;
1966
		set_bit(NVME_NS_READY, &ns->flags);
1967 1968
		ret = 0;
	}
1969
	blk_mq_unfreeze_queue(ns->disk->queue);
K
Keith Busch 已提交
1970 1971 1972
	return ret;
}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
static char nvme_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 1;
	case PR_EXCLUSIVE_ACCESS:
		return 2;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 3;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 4;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 5;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 6;
	default:
		return 0;
	}
1991
}
1992

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
		struct nvme_command *c, u8 data[16])
{
	struct nvme_ns_head *head = bdev->bd_disk->private_data;
	int srcu_idx = srcu_read_lock(&head->srcu);
	struct nvme_ns *ns = nvme_find_path(head);
	int ret = -EWOULDBLOCK;

	if (ns) {
		c->common.nsid = cpu_to_le32(ns->head->ns_id);
		ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
	}
	srcu_read_unlock(&head->srcu, srcu_idx);
	return ret;
}
	
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
		u8 data[16])
{
	c->common.nsid = cpu_to_le32(ns->head->ns_id);
	return nvme_submit_sync_cmd(ns->queue, c, data, 16);
}

2016 2017 2018
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
				u64 key, u64 sa_key, u8 op)
{
2019
	struct nvme_command c = { };
2020 2021 2022 2023 2024 2025
	u8 data[16] = { 0, };

	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	c.common.opcode = op;
2026
	c.common.cdw10 = cpu_to_le32(cdw10);
2027

2028 2029 2030 2031
	if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
	    bdev->bd_disk->fops == &nvme_ns_head_ops)
		return nvme_send_ns_head_pr_command(bdev, &c, data);
	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
}

static int nvme_pr_register(struct block_device *bdev, u64 old,
		u64 new, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = old ? 2 : 0;
	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
}

static int nvme_pr_reserve(struct block_device *bdev, u64 key,
		enum pr_type type, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = nvme_pr_type(type) << 8;
	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
}

static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
{
2064
	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2065

2066 2067 2068 2069 2070
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}

static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
2071
	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2072

2073 2074 2075 2076 2077
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
}

static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
2078
	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2079

2080 2081 2082
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}

2083
const struct pr_ops nvme_pr_ops = {
2084 2085 2086 2087 2088 2089 2090
	.pr_register	= nvme_pr_register,
	.pr_reserve	= nvme_pr_reserve,
	.pr_release	= nvme_pr_release,
	.pr_preempt	= nvme_pr_preempt,
	.pr_clear	= nvme_pr_clear,
};

2091
#ifdef CONFIG_BLK_SED_OPAL
2092 2093
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
2094
{
2095
	struct nvme_ctrl *ctrl = data;
2096
	struct nvme_command cmd = { };
2097 2098 2099 2100 2101 2102

	if (send)
		cmd.common.opcode = nvme_admin_security_send;
	else
		cmd.common.opcode = nvme_admin_security_recv;
	cmd.common.nsid = 0;
2103 2104
	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
	cmd.common.cdw11 = cpu_to_le32(len);
2105

2106
	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
2107
			NVME_QID_ANY, 1, 0);
2108 2109 2110 2111
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_report_zones(struct gendisk *disk, sector_t sector,
		unsigned int nr_zones, report_zones_cb cb, void *data)
{
	return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
			data);
}
#else
#define nvme_report_zones	NULL
#endif /* CONFIG_BLK_DEV_ZONED */

J
Javier González 已提交
2123
static const struct block_device_operations nvme_bdev_ops = {
2124 2125 2126 2127 2128
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
	.open		= nvme_open,
	.release	= nvme_release,
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
2129
	.report_zones	= nvme_report_zones,
2130 2131 2132
	.pr_ops		= &nvme_pr_ops,
};

2133
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 timeout, bool enabled)
2134
{
2135
	unsigned long timeout_jiffies = ((timeout + 1) * HZ / 2) + jiffies;
2136 2137 2138 2139
	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
	int ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
K
Keith Busch 已提交
2140 2141
		if (csts == ~0)
			return -ENODEV;
2142 2143 2144
		if ((csts & NVME_CSTS_RDY) == bit)
			break;

2145
		usleep_range(1000, 2000);
2146 2147
		if (fatal_signal_pending(current))
			return -EINTR;
2148
		if (time_after(jiffies, timeout_jiffies)) {
2149
			dev_err(ctrl->device,
2150 2151
				"Device not ready; aborting %s, CSTS=0x%x\n",
				enabled ? "initialisation" : "reset", csts);
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
			return -ENODEV;
		}
	}

	return ret;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
2165
int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2166 2167 2168 2169 2170 2171 2172 2173 2174
{
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config &= ~NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2175

2176
	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2177 2178
		msleep(NVME_QUIRK_DELAY_AMOUNT);

2179
	return nvme_wait_ready(ctrl, NVME_CAP_TIMEOUT(ctrl->cap), false);
2180
}
2181
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2182

2183
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2184
{
2185
	unsigned dev_page_min;
2186
	u32 timeout;
2187 2188
	int ret;

2189 2190 2191 2192 2193 2194 2195
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
	if (ret) {
		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
		return ret;
	}
	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;

2196
	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2197
		dev_err(ctrl->device,
2198
			"Minimum device page size %u too large for host (%u)\n",
2199
			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2200 2201 2202
		return -ENODEV;
	}

2203 2204 2205 2206
	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
		ctrl->ctrl_config = NVME_CC_CSS_CSI;
	else
		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227

	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
		u32 crto;

		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
		if (ret) {
			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
				ret);
			return ret;
		}

		if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
			ctrl->ctrl_config |= NVME_CC_CRIME;
			timeout = NVME_CRTO_CRIMT(crto);
		} else {
			timeout = NVME_CRTO_CRWMT(crto);
		}
	} else {
		timeout = NVME_CAP_TIMEOUT(ctrl->cap);
	}

2228
	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2229
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2230
	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2231 2232 2233
	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2234

2235 2236 2237 2238 2239 2240
	/* Flush write to device (required if transport is PCI) */
	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
	if (ret)
		return ret;

	ctrl->ctrl_config |= NVME_CC_ENABLE;
2241 2242 2243
	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2244
	return nvme_wait_ready(ctrl, timeout, true);
2245
}
2246
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2247 2248 2249

int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
2250
	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
	u32 csts;
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2269
			dev_err(ctrl->device,
2270 2271 2272 2273 2274 2275 2276
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return ret;
}
2277
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2278

2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
	int ret;

	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
		return 0;

	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
			NULL);
	if (ret)
		dev_warn_once(ctrl->device,
			"could not set timestamp (%d)\n", ret);
	return ret;
}

2296
static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
2297 2298
{
	struct nvme_feat_host_behavior *host;
2299
	u8 acre = 0, lbafee = 0;
2300 2301 2302
	int ret;

	/* Don't bother enabling the feature if retry delay is not reported */
2303 2304 2305 2306 2307 2308
	if (ctrl->crdt[0])
		acre = NVME_ENABLE_ACRE;
	if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
		lbafee = NVME_ENABLE_LBAFEE;

	if (!acre && !lbafee)
2309 2310 2311 2312 2313 2314
		return 0;

	host = kzalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
		return 0;

2315 2316
	host->acre = acre;
	host->lbafee = lbafee;
2317 2318 2319 2320 2321 2322
	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
				host, sizeof(*host), NULL);
	kfree(host);
	return ret;
}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
/*
 * The function checks whether the given total (exlat + enlat) latency of
 * a power state allows the latter to be used as an APST transition target.
 * It does so by comparing the latency to the primary and secondary latency
 * tolerances defined by module params. If there's a match, the corresponding
 * timeout value is returned and the matching tolerance index (1 or 2) is
 * reported.
 */
static bool nvme_apst_get_transition_time(u64 total_latency,
		u64 *transition_time, unsigned *last_index)
{
	if (total_latency <= apst_primary_latency_tol_us) {
		if (*last_index == 1)
			return false;
		*last_index = 1;
		*transition_time = apst_primary_timeout_ms;
		return true;
	}
	if (apst_secondary_timeout_ms &&
		total_latency <= apst_secondary_latency_tol_us) {
		if (*last_index <= 2)
			return false;
		*last_index = 2;
		*transition_time = apst_secondary_timeout_ms;
		return true;
	}
	return false;
}

2352 2353 2354
/*
 * APST (Autonomous Power State Transition) lets us program a table of power
 * state transitions that the controller will perform automatically.
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
 *
 * Depending on module params, one of the two supported techniques will be used:
 *
 * - If the parameters provide explicit timeouts and tolerances, they will be
 *   used to build a table with up to 2 non-operational states to transition to.
 *   The default parameter values were selected based on the values used by
 *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
 *   regeneration of the APST table in the event of switching between external
 *   and battery power, the timeouts and tolerances reflect a compromise
 *   between values used by Microsoft for AC and battery scenarios.
 * - If not, we'll configure the table with a simple heuristic: we are willing
 *   to spend at most 2% of the time transitioning between power states.
 *   Therefore, when running in any given state, we will enter the next
 *   lower-power non-operational state after waiting 50 * (enlat + exlat)
 *   microseconds, as long as that state's exit latency is under the requested
 *   maximum latency.
2371 2372 2373 2374 2375 2376
 *
 * We will not autonomously enter any non-operational state for which the total
 * latency exceeds ps_max_latency_us.
 *
 * Users can set ps_max_latency_us to zero to turn off APST.
 */
2377
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2378 2379
{
	struct nvme_feat_auto_pst *table;
2380
	unsigned apste = 0;
2381
	u64 max_lat_us = 0;
2382
	__le64 target = 0;
2383
	int max_ps = -1;
2384
	int state;
2385
	int ret;
2386
	unsigned last_lt_index = UINT_MAX;
2387 2388 2389 2390 2391 2392

	/*
	 * If APST isn't supported or if we haven't been initialized yet,
	 * then don't do anything.
	 */
	if (!ctrl->apsta)
2393
		return 0;
2394 2395 2396

	if (ctrl->npss > 31) {
		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2397
		return 0;
2398 2399 2400 2401
	}

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
2402
		return 0;
2403

2404
	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2405
		/* Turn off APST. */
2406
		dev_dbg(ctrl->device, "APST disabled\n");
2407 2408
		goto done;
	}
2409

2410 2411 2412 2413 2414 2415 2416 2417
	/*
	 * Walk through all states from lowest- to highest-power.
	 * According to the spec, lower-numbered states use more power.  NPSS,
	 * despite the name, is the index of the lowest-power state, not the
	 * number of states.
	 */
	for (state = (int)ctrl->npss; state >= 0; state--) {
		u64 total_latency_us, exit_latency_us, transition_ms;
2418

2419 2420
		if (target)
			table->entries[state] = target;
2421 2422

		/*
2423 2424
		 * Don't allow transitions to the deepest state if it's quirked
		 * off.
2425
		 */
2426 2427 2428
		if (state == ctrl->npss &&
		    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
			continue;
2429

2430 2431 2432 2433 2434 2435
		/*
		 * Is this state a useful non-operational state for higher-power
		 * states to autonomously transition to?
		 */
		if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
			continue;
2436

2437 2438 2439
		exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
		if (exit_latency_us > ctrl->ps_max_latency_us)
			continue;
2440

2441 2442
		total_latency_us = exit_latency_us +
			le32_to_cpu(ctrl->psd[state].entry_lat);
2443

2444
		/*
2445 2446
		 * This state is good. It can be used as the APST idle target
		 * for higher power states.
2447
		 */
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
		if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
			if (!nvme_apst_get_transition_time(total_latency_us,
					&transition_ms, &last_lt_index))
				continue;
		} else {
			transition_ms = total_latency_us + 19;
			do_div(transition_ms, 20);
			if (transition_ms > (1 << 24) - 1)
				transition_ms = (1 << 24) - 1;
		}
2458 2459 2460 2461 2462 2463

		target = cpu_to_le64((state << 3) | (transition_ms << 8));
		if (max_ps == -1)
			max_ps = state;
		if (total_latency_us > max_lat_us)
			max_lat_us = total_latency_us;
2464 2465
	}

2466 2467 2468 2469 2470 2471 2472 2473
	if (max_ps == -1)
		dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
	else
		dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
			max_ps, max_lat_us, (int)sizeof(*table), table);
	apste = 1;

done:
2474 2475 2476 2477 2478
	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
				table, sizeof(*table), NULL);
	if (ret)
		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
	kfree(table);
2479
	return ret;
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	u64 latency;

	switch (val) {
	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
	case PM_QOS_LATENCY_ANY:
		latency = U64_MAX;
		break;

	default:
		latency = val;
	}

	if (ctrl->ps_max_latency_us != latency) {
		ctrl->ps_max_latency_us = latency;
2499 2500
		if (ctrl->state == NVME_CTRL_LIVE)
			nvme_configure_apst(ctrl);
2501 2502 2503
	}
}

2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
struct nvme_core_quirk_entry {
	/*
	 * NVMe model and firmware strings are padded with spaces.  For
	 * simplicity, strings in the quirk table are padded with NULLs
	 * instead.
	 */
	u16 vid;
	const char *mn;
	const char *fr;
	unsigned long quirks;
};

static const struct nvme_core_quirk_entry core_quirks[] = {
2517
	{
2518 2519 2520 2521 2522 2523
		/*
		 * This Toshiba device seems to die using any APST states.  See:
		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
		 */
		.vid = 0x1179,
		.mn = "THNSF5256GPUK TOSHIBA",
2524
		.quirks = NVME_QUIRK_NO_APST,
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
	},
	{
		/*
		 * This LiteON CL1-3D*-Q11 firmware version has a race
		 * condition associated with actions related to suspend to idle
		 * LiteON has resolved the problem in future firmware
		 */
		.vid = 0x14a4,
		.fr = "22301111",
		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
	},
	{
		/*
		 * This Kioxia CD6-V Series / HPE PE8030 device times out and
		 * aborts I/O during any load, but more easily reproducible
		 * with discards (fstrim).
		 *
		 * The device is left in a state where it is also not possible
		 * to use "nvme set-feature" to disable APST, but booting with
		 * nvme_core.default_ps_max_latency=0 works.
		 */
		.vid = 0x1e0f,
		.mn = "KCD6XVUL6T40",
		.quirks = NVME_QUIRK_NO_APST,
2549
	}
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
};

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{
	size_t matchlen;

	if (!match)
		return true;

	matchlen = strlen(match);
	WARN_ON_ONCE(matchlen > len);

	if (memcmp(idstr, match, matchlen))
		return false;

	for (; matchlen < len; matchlen++)
		if (idstr[matchlen] != ' ')
			return false;

	return true;
}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{
	return q->vid == le16_to_cpu(id->vid) &&
		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
		string_matches(id->fr, q->fr, sizeof(id->fr));
}

C
Christoph Hellwig 已提交
2581 2582
static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
2583 2584 2585 2586
{
	size_t nqnlen;
	int off;

2587 2588 2589 2590 2591 2592
	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
			return;
		}
2593

2594 2595 2596
		if (ctrl->vs >= NVME_VS(1, 2, 1))
			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
	}
2597 2598

	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
C
Christoph Hellwig 已提交
2599
	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2600
			"nqn.2014.08.org.nvmexpress:%04x%04x",
2601
			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
C
Christoph Hellwig 已提交
2602
	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2603
	off += sizeof(id->sn);
C
Christoph Hellwig 已提交
2604
	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2605
	off += sizeof(id->mn);
C
Christoph Hellwig 已提交
2606 2607 2608
	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}

2609
static void nvme_release_subsystem(struct device *dev)
C
Christoph Hellwig 已提交
2610
{
2611 2612 2613
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2614
	if (subsys->instance >= 0)
2615
		ida_free(&nvme_instance_ida, subsys->instance);
C
Christoph Hellwig 已提交
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
	kfree(subsys);
}

static void nvme_destroy_subsystem(struct kref *ref)
{
	struct nvme_subsystem *subsys =
			container_of(ref, struct nvme_subsystem, ref);

	mutex_lock(&nvme_subsystems_lock);
	list_del(&subsys->entry);
	mutex_unlock(&nvme_subsystems_lock);

C
Christoph Hellwig 已提交
2628
	ida_destroy(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
	device_del(&subsys->dev);
	put_device(&subsys->dev);
}

static void nvme_put_subsystem(struct nvme_subsystem *subsys)
{
	kref_put(&subsys->ref, nvme_destroy_subsystem);
}

static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
{
	struct nvme_subsystem *subsys;

	lockdep_assert_held(&nvme_subsystems_lock);

2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
	/*
	 * Fail matches for discovery subsystems. This results
	 * in each discovery controller bound to a unique subsystem.
	 * This avoids issues with validating controller values
	 * that can only be true when there is a single unique subsystem.
	 * There may be multiple and completely independent entities
	 * that provide discovery controllers.
	 */
	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
		return NULL;

C
Christoph Hellwig 已提交
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	list_for_each_entry(subsys, &nvme_subsystems, entry) {
		if (strcmp(subsys->subnqn, subsysnqn))
			continue;
		if (!kref_get_unless_zero(&subsys->ref))
			continue;
		return subsys;
	}

	return NULL;
}

2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
#define SUBSYS_ATTR_RO(_name, _mode, _show)			\
	struct device_attribute subsys_attr_##_name = \
		__ATTR(_name, _mode, _show, NULL)

static ssize_t nvme_subsys_show_nqn(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2677
	return sysfs_emit(buf, "%s\n", subsys->subnqn);
2678 2679 2680
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);

2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
static ssize_t nvme_subsys_show_type(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

	switch (subsys->subtype) {
	case NVME_NQN_DISC:
		return sysfs_emit(buf, "discovery\n");
	case NVME_NQN_NVME:
		return sysfs_emit(buf, "nvm\n");
	default:
		return sysfs_emit(buf, "reserved\n");
	}
}
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);

2699 2700 2701 2702 2703 2704
#define nvme_subsys_show_str_function(field)				\
static ssize_t subsys_##field##_show(struct device *dev,		\
			    struct device_attribute *attr, char *buf)	\
{									\
	struct nvme_subsystem *subsys =					\
		container_of(dev, struct nvme_subsystem, dev);		\
2705 2706
	return sysfs_emit(buf, "%.*s\n",				\
			   (int)sizeof(subsys->field), subsys->field);	\
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
}									\
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);

nvme_subsys_show_str_function(model);
nvme_subsys_show_str_function(serial);
nvme_subsys_show_str_function(firmware_rev);

static struct attribute *nvme_subsys_attrs[] = {
	&subsys_attr_model.attr,
	&subsys_attr_serial.attr,
	&subsys_attr_firmware_rev.attr,
	&subsys_attr_subsysnqn.attr,
2719
	&subsys_attr_subsystype.attr,
2720 2721 2722
#ifdef CONFIG_NVME_MULTIPATH
	&subsys_attr_iopolicy.attr,
#endif
2723 2724 2725
	NULL,
};

2726
static const struct attribute_group nvme_subsys_attrs_group = {
2727 2728 2729 2730 2731 2732 2733 2734
	.attrs = nvme_subsys_attrs,
};

static const struct attribute_group *nvme_subsys_attrs_groups[] = {
	&nvme_subsys_attrs_group,
	NULL,
};

2735 2736 2737 2738 2739
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{
	return ctrl->opts && ctrl->opts->discovery_nqn;
}

2740 2741
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2742
{
2743
	struct nvme_ctrl *tmp;
2744

2745 2746
	lockdep_assert_held(&nvme_subsystems_lock);

2747
	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2748
		if (nvme_state_terminal(tmp))
2749 2750 2751 2752
			continue;

		if (tmp->cntlid == ctrl->cntlid) {
			dev_err(ctrl->device,
2753 2754 2755
				"Duplicate cntlid %u with %s, subsys %s, rejecting\n",
				ctrl->cntlid, dev_name(tmp->device),
				subsys->subnqn);
2756 2757
			return false;
		}
2758

2759
		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2760
		    nvme_discovery_ctrl(ctrl))
2761 2762 2763 2764 2765
			continue;

		dev_err(ctrl->device,
			"Subsystem does not support multiple controllers\n");
		return false;
2766 2767
	}

2768
	return true;
2769 2770
}

C
Christoph Hellwig 已提交
2771 2772 2773 2774 2775 2776 2777 2778
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	struct nvme_subsystem *subsys, *found;
	int ret;

	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
	if (!subsys)
		return -ENOMEM;
2779 2780

	subsys->instance = -1;
C
Christoph Hellwig 已提交
2781 2782 2783
	mutex_init(&subsys->lock);
	kref_init(&subsys->ref);
	INIT_LIST_HEAD(&subsys->ctrls);
C
Christoph Hellwig 已提交
2784
	INIT_LIST_HEAD(&subsys->nsheads);
C
Christoph Hellwig 已提交
2785 2786 2787 2788 2789 2790
	nvme_init_subnqn(subsys, ctrl, id);
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->cmic = id->cmic;
2791 2792 2793 2794 2795 2796 2797 2798

	/* Versions prior to 1.4 don't necessarily report a valid type */
	if (id->cntrltype == NVME_CTRL_DISC ||
	    !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
		subsys->subtype = NVME_NQN_DISC;
	else
		subsys->subtype = NVME_NQN_NVME;

2799 2800 2801 2802 2803 2804 2805
	if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
		dev_err(ctrl->device,
			"Subsystem %s is not a discovery controller",
			subsys->subnqn);
		kfree(subsys);
		return -EINVAL;
	}
2806
	subsys->awupf = le16_to_cpu(id->awupf);
2807
	nvme_mpath_default_iopolicy(subsys);
C
Christoph Hellwig 已提交
2808 2809 2810

	subsys->dev.class = nvme_subsys_class;
	subsys->dev.release = nvme_release_subsystem;
2811
	subsys->dev.groups = nvme_subsys_attrs_groups;
2812
	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
C
Christoph Hellwig 已提交
2813 2814 2815 2816 2817
	device_initialize(&subsys->dev);

	mutex_lock(&nvme_subsystems_lock);
	found = __nvme_find_get_subsystem(subsys->subnqn);
	if (found) {
2818
		put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2819
		subsys = found;
2820

2821
		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
C
Christoph Hellwig 已提交
2822
			ret = -EINVAL;
2823
			goto out_put_subsystem;
C
Christoph Hellwig 已提交
2824 2825 2826 2827 2828 2829
		}
	} else {
		ret = device_add(&subsys->dev);
		if (ret) {
			dev_err(ctrl->device,
				"failed to register subsystem device.\n");
2830
			put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2831 2832
			goto out_unlock;
		}
C
Christoph Hellwig 已提交
2833
		ida_init(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2834 2835 2836
		list_add_tail(&subsys->entry, &nvme_subsystems);
	}

2837 2838 2839
	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
				dev_name(ctrl->device));
	if (ret) {
C
Christoph Hellwig 已提交
2840 2841
		dev_err(ctrl->device,
			"failed to create sysfs link from subsystem.\n");
2842
		goto out_put_subsystem;
C
Christoph Hellwig 已提交
2843 2844
	}

2845 2846
	if (!found)
		subsys->instance = ctrl->instance;
2847
	ctrl->subsys = subsys;
C
Christoph Hellwig 已提交
2848
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2849
	mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
2850 2851
	return 0;

2852 2853
out_put_subsystem:
	nvme_put_subsystem(subsys);
C
Christoph Hellwig 已提交
2854 2855 2856
out_unlock:
	mutex_unlock(&nvme_subsystems_lock);
	return ret;
2857 2858
}

2859
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2860
		void *log, size_t size, u64 offset)
K
Keith Busch 已提交
2861 2862
{
	struct nvme_command c = { };
K
Keith Busch 已提交
2863
	u32 dwlen = nvme_bytes_to_numd(size);
2864 2865

	c.get_log_page.opcode = nvme_admin_get_log_page;
2866
	c.get_log_page.nsid = cpu_to_le32(nsid);
2867
	c.get_log_page.lid = log_page;
2868
	c.get_log_page.lsp = lsp;
2869 2870
	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2871 2872
	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2873
	c.get_log_page.csi = csi;
K
Keith Busch 已提交
2874 2875 2876 2877

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}

2878 2879
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
				struct nvme_effects_log **log)
2880
{
2881
	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
2882 2883
	int ret;

2884 2885
	if (cel)
		goto out;
2886

2887 2888 2889
	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
	if (!cel)
		return -ENOMEM;
2890

2891
	ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2892
			cel, sizeof(*cel), 0);
2893
	if (ret) {
2894 2895
		kfree(cel);
		return ret;
2896
	}
2897

2898
	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2899
out:
2900
	*log = cel;
2901
	return 0;
2902 2903
}

2904
static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2905
{
2906
	u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2907

2908 2909 2910
	if (check_shl_overflow(1U, units + page_shift - 9, &val))
		return UINT_MAX;
	return val;
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
}

static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
{
	struct nvme_command c = { };
	struct nvme_id_ctrl_nvm *id;
	int ret;

	if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
		ctrl->max_discard_sectors = UINT_MAX;
		ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
	} else {
		ctrl->max_discard_sectors = 0;
		ctrl->max_discard_segments = 0;
2925
	}
2926

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
	/*
	 * Even though NVMe spec explicitly states that MDTS is not applicable
	 * to the write-zeroes, we are cautious and limit the size to the
	 * controllers max_hw_sectors value, which is based on the MDTS field
	 * and possibly other limiting factors.
	 */
	if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
	    !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
		ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
	else
		ctrl->max_zeroes_sectors = 0;

	if (nvme_ctrl_limited_cns(ctrl))
		return 0;

	id = kzalloc(sizeof(*id), GFP_KERNEL);
	if (!id)
		return 0;

	c.identify.opcode = nvme_admin_identify;
	c.identify.cns = NVME_ID_CNS_CS_CTRL;
	c.identify.csi = NVME_CSI_NVM;

	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
	if (ret)
		goto free_data;

	if (id->dmrl)
		ctrl->max_discard_segments = id->dmrl;
T
Tom Yan 已提交
2956
	ctrl->dmrsl = le32_to_cpu(id->dmrsl);
2957 2958 2959 2960 2961 2962 2963 2964
	if (id->wzsl)
		ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);

free_data:
	kfree(id);
	return ret;
}

2965
static int nvme_init_identify(struct nvme_ctrl *ctrl)
2966 2967
{
	struct nvme_id_ctrl *id;
2968
	u32 max_hw_sectors;
2969
	bool prev_apst_enabled;
2970
	int ret;
2971

2972 2973
	ret = nvme_identify_ctrl(ctrl, &id);
	if (ret) {
2974
		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2975 2976 2977
		return -EIO;
	}

2978
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2979
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2980
		if (ret < 0)
2981
			goto out_free;
2982
	}
2983

2984 2985 2986
	if (!(ctrl->ops->flags & NVME_F_FABRICS))
		ctrl->cntlid = le16_to_cpu(id->cntlid);

2987
	if (!ctrl->identified) {
2988
		unsigned int i;
C
Christoph Hellwig 已提交
2989 2990 2991 2992 2993

		ret = nvme_init_subsystem(ctrl, id);
		if (ret)
			goto out_free;

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007
		/*
		 * Check for quirks.  Quirk can depend on firmware version,
		 * so, in principle, the set of quirks present can change
		 * across a reset.  As a possible future enhancement, we
		 * could re-scan for quirks every time we reinitialize
		 * the device, but we'd have to make sure that the driver
		 * behaves intelligently if the quirks change.
		 */
		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
			if (quirk_matches(id, &core_quirks[i]))
				ctrl->quirks |= core_quirks[i].quirks;
		}
	}

3008
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3009
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3010 3011 3012
		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
	}

3013 3014 3015 3016
	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
	ctrl->crdt[2] = le16_to_cpu(id->crdt3);

3017
	ctrl->oacs = le16_to_cpu(id->oacs);
3018
	ctrl->oncs = le16_to_cpu(id->oncs);
3019
	ctrl->mtfa = le16_to_cpu(id->mtfa);
3020
	ctrl->oaes = le32_to_cpu(id->oaes);
3021 3022 3023
	ctrl->wctemp = le16_to_cpu(id->wctemp);
	ctrl->cctemp = le16_to_cpu(id->cctemp);

3024
	atomic_set(&ctrl->abort_limit, id->acl + 1);
3025 3026
	ctrl->vwc = id->vwc;
	if (id->mdts)
3027
		max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3028
	else
3029 3030 3031
		max_hw_sectors = UINT_MAX;
	ctrl->max_hw_sectors =
		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3032

3033
	nvme_set_queue_limits(ctrl, ctrl->admin_q);
3034
	ctrl->sgls = le32_to_cpu(id->sgls);
S
Sagi Grimberg 已提交
3035
	ctrl->kas = le16_to_cpu(id->kas);
C
Christoph Hellwig 已提交
3036
	ctrl->max_namespaces = le32_to_cpu(id->mnan);
S
Sagi Grimberg 已提交
3037
	ctrl->ctratt = le32_to_cpu(id->ctratt);
3038

3039 3040 3041
	ctrl->cntrltype = id->cntrltype;
	ctrl->dctype = id->dctype;

3042 3043
	if (id->rtd3e) {
		/* us -> s */
3044
		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3045 3046 3047 3048 3049

		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
						 shutdown_timeout, 60);

		if (ctrl->shutdown_timeout != shutdown_timeout)
3050
			dev_info(ctrl->device,
3051 3052 3053 3054 3055
				 "Shutdown timeout set to %u seconds\n",
				 ctrl->shutdown_timeout);
	} else
		ctrl->shutdown_timeout = shutdown_timeout;

3056
	ctrl->npss = id->npss;
3057 3058
	ctrl->apsta = id->apsta;
	prev_apst_enabled = ctrl->apst_enabled;
3059 3060
	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
		if (force_apst && id->apsta) {
3061
			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3062
			ctrl->apst_enabled = true;
3063
		} else {
3064
			ctrl->apst_enabled = false;
3065 3066
		}
	} else {
3067
		ctrl->apst_enabled = id->apsta;
3068
	}
3069 3070
	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));

3071
	if (ctrl->ops->flags & NVME_F_FABRICS) {
3072 3073 3074 3075 3076 3077 3078 3079 3080
		ctrl->icdoff = le16_to_cpu(id->icdoff);
		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
		ctrl->maxcmd = le16_to_cpu(id->maxcmd);

		/*
		 * In fabrics we need to verify the cntlid matches the
		 * admin connect
		 */
3081
		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3082 3083 3084 3085
			dev_err(ctrl->device,
				"Mismatching cntlid: Connect %u vs Identify "
				"%u, rejecting\n",
				ctrl->cntlid, le16_to_cpu(id->cntlid));
3086
			ret = -EINVAL;
3087 3088
			goto out_free;
		}
S
Sagi Grimberg 已提交
3089

3090
		if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3091
			dev_err(ctrl->device,
S
Sagi Grimberg 已提交
3092 3093
				"keep-alive support is mandatory for fabrics\n");
			ret = -EINVAL;
3094
			goto out_free;
S
Sagi Grimberg 已提交
3095
		}
3096
	} else {
3097 3098
		ctrl->hmpre = le32_to_cpu(id->hmpre);
		ctrl->hmmin = le32_to_cpu(id->hmmin);
3099 3100
		ctrl->hmminds = le32_to_cpu(id->hmminds);
		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3101
	}
3102

3103
	ret = nvme_mpath_init_identify(ctrl, id);
C
Christoph Hellwig 已提交
3104
	if (ret < 0)
3105
		goto out_free;
C
Christoph Hellwig 已提交
3106

3107
	if (ctrl->apst_enabled && !prev_apst_enabled)
3108
		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3109
	else if (!ctrl->apst_enabled && prev_apst_enabled)
3110 3111
		dev_pm_qos_hide_latency_tolerance(ctrl->device);

3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
out_free:
	kfree(id);
	return ret;
}

/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
{
	int ret;

	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
	if (ret) {
		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
		return ret;
	}

	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);

	if (ctrl->vs >= NVME_VS(1, 1, 0))
		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);

	ret = nvme_init_identify(ctrl);
	if (ret)
		return ret;

3141 3142 3143
	ret = nvme_configure_apst(ctrl);
	if (ret < 0)
		return ret;
3144

3145 3146 3147
	ret = nvme_configure_timestamp(ctrl);
	if (ret < 0)
		return ret;
3148

3149
	ret = nvme_configure_host_options(ctrl);
3150 3151 3152
	if (ret < 0)
		return ret;

3153
	if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
K
Keith Busch 已提交
3154 3155 3156 3157
		ret = nvme_hwmon_init(ctrl);
		if (ret < 0)
			return ret;
	}
3158

3159
	ctrl->identified = true;
3160

3161
	return 0;
3162
}
3163
EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3164

3165
static int nvme_dev_open(struct inode *inode, struct file *file)
3166
{
3167 3168
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3169

3170 3171 3172 3173
	switch (ctrl->state) {
	case NVME_CTRL_LIVE:
		break;
	default:
3174
		return -EWOULDBLOCK;
3175 3176
	}

3177
	nvme_get_ctrl(ctrl);
3178 3179
	if (!try_module_get(ctrl->ops->module)) {
		nvme_put_ctrl(ctrl);
3180
		return -EINVAL;
3181
	}
3182

3183
	file->private_data = ctrl;
3184 3185 3186
	return 0;
}

3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
static int nvme_dev_release(struct inode *inode, struct file *file)
{
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);

	module_put(ctrl->ops->module);
	nvme_put_ctrl(ctrl);
	return 0;
}

3197 3198 3199
static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
3200
	.release	= nvme_dev_release,
3201
	.unlocked_ioctl	= nvme_dev_ioctl,
3202
	.compat_ioctl	= compat_ptr_ioctl,
3203
	.uring_cmd	= nvme_dev_uring_cmd,
3204 3205 3206 3207 3208 3209 3210 3211 3212
};

static ssize_t nvme_sysfs_reset(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	int ret;

3213
	ret = nvme_reset_ctrl_sync(ctrl);
3214 3215 3216
	if (ret < 0)
		return ret;
	return count;
3217
}
3218
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3219

K
Keith Busch 已提交
3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
static ssize_t nvme_sysfs_rescan(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	nvme_queue_scan(ctrl);
	return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);

3231 3232 3233 3234
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
	struct gendisk *disk = dev_to_disk(dev);

J
Javier González 已提交
3235
	if (disk->fops == &nvme_bdev_ops)
3236 3237 3238 3239 3240
		return nvme_get_ns_from_dev(dev)->head;
	else
		return disk->private_data;
}

3241
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3242
		char *buf)
3243
{
3244 3245 3246
	struct nvme_ns_head *head = dev_to_ns_head(dev);
	struct nvme_ns_ids *ids = &head->ids;
	struct nvme_subsystem *subsys = head->subsys;
C
Christoph Hellwig 已提交
3247 3248
	int serial_len = sizeof(subsys->serial);
	int model_len = sizeof(subsys->model);
3249

3250
	if (!uuid_is_null(&ids->uuid))
3251
		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3252

3253
	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3254
		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3255

3256
	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3257
		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3258

C
Christoph Hellwig 已提交
3259 3260
	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
				  subsys->serial[serial_len - 1] == '\0'))
3261
		serial_len--;
C
Christoph Hellwig 已提交
3262 3263
	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
				 subsys->model[model_len - 1] == '\0'))
3264 3265
		model_len--;

3266
	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
C
Christoph Hellwig 已提交
3267
		serial_len, subsys->serial, model_len, subsys->model,
3268
		head->ns_id);
3269
}
J
Joe Perches 已提交
3270
static DEVICE_ATTR_RO(wwid);
3271

3272
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3273
		char *buf)
3274
{
3275
	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3276
}
J
Joe Perches 已提交
3277
static DEVICE_ATTR_RO(nguid);
3278

3279
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3280
		char *buf)
3281
{
3282
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3283 3284 3285 3286

	/* For backward compatibility expose the NGUID to userspace if
	 * we have no UUID set
	 */
3287
	if (uuid_is_null(&ids->uuid)) {
3288 3289
		dev_warn_ratelimited(dev,
			"No UUID available providing old NGUID\n");
3290
		return sysfs_emit(buf, "%pU\n", ids->nguid);
3291
	}
3292
	return sysfs_emit(buf, "%pU\n", &ids->uuid);
3293
}
J
Joe Perches 已提交
3294
static DEVICE_ATTR_RO(uuid);
3295 3296

static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3297
		char *buf)
3298
{
3299
	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3300
}
J
Joe Perches 已提交
3301
static DEVICE_ATTR_RO(eui);
3302 3303

static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3304
		char *buf)
3305
{
3306
	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3307
}
J
Joe Perches 已提交
3308
static DEVICE_ATTR_RO(nsid);
3309

3310
static struct attribute *nvme_ns_id_attrs[] = {
3311
	&dev_attr_wwid.attr,
3312
	&dev_attr_uuid.attr,
3313
	&dev_attr_nguid.attr,
3314 3315
	&dev_attr_eui.attr,
	&dev_attr_nsid.attr,
C
Christoph Hellwig 已提交
3316 3317 3318 3319
#ifdef CONFIG_NVME_MULTIPATH
	&dev_attr_ana_grpid.attr,
	&dev_attr_ana_state.attr,
#endif
3320 3321 3322
	NULL,
};

3323
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3324 3325 3326
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
3327
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3328 3329

	if (a == &dev_attr_uuid.attr) {
3330
		if (uuid_is_null(&ids->uuid) &&
3331
		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3332 3333 3334
			return 0;
	}
	if (a == &dev_attr_nguid.attr) {
3335
		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3336 3337 3338
			return 0;
	}
	if (a == &dev_attr_eui.attr) {
3339
		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3340 3341
			return 0;
	}
C
Christoph Hellwig 已提交
3342 3343
#ifdef CONFIG_NVME_MULTIPATH
	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
J
Javier González 已提交
3344
		if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
C
Christoph Hellwig 已提交
3345 3346 3347 3348 3349
			return 0;
		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
			return 0;
	}
#endif
3350 3351 3352
	return a->mode;
}

3353
static const struct attribute_group nvme_ns_id_attr_group = {
3354 3355
	.attrs		= nvme_ns_id_attrs,
	.is_visible	= nvme_ns_id_attrs_are_visible,
3356 3357
};

3358 3359 3360 3361 3362
const struct attribute_group *nvme_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
	NULL,
};

M
Ming Lin 已提交
3363
#define nvme_show_str_function(field)						\
3364 3365 3366 3367
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3368
        return sysfs_emit(buf, "%.*s\n",					\
C
Christoph Hellwig 已提交
3369
		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3370 3371 3372
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

C
Christoph Hellwig 已提交
3373 3374 3375 3376
nvme_show_str_function(model);
nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);

M
Ming Lin 已提交
3377 3378 3379 3380 3381
#define nvme_show_int_function(field)						\
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3382
        return sysfs_emit(buf, "%d\n", ctrl->field);				\
M
Ming Lin 已提交
3383 3384 3385 3386
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

nvme_show_int_function(cntlid);
3387
nvme_show_int_function(numa_node);
3388 3389
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
3390
nvme_show_int_function(kato);
3391

M
Ming Lin 已提交
3392 3393 3394 3395 3396 3397 3398
static ssize_t nvme_sysfs_delete(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (device_remove_file_self(dev, attr))
3399
		nvme_delete_ctrl_sync(ctrl);
M
Ming Lin 已提交
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
	return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);

static ssize_t nvme_sysfs_show_transport(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3410
	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
M
Ming Lin 已提交
3411 3412 3413
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

3414 3415 3416 3417 3418 3419 3420 3421 3422
static ssize_t nvme_sysfs_show_state(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	static const char *const state_name[] = {
		[NVME_CTRL_NEW]		= "new",
		[NVME_CTRL_LIVE]	= "live",
		[NVME_CTRL_RESETTING]	= "resetting",
3423
		[NVME_CTRL_CONNECTING]	= "connecting",
3424
		[NVME_CTRL_DELETING]	= "deleting",
3425
		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3426 3427 3428 3429 3430
		[NVME_CTRL_DEAD]	= "dead",
	};

	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
	    state_name[ctrl->state])
3431
		return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3432

3433
	return sysfs_emit(buf, "unknown state\n");
3434 3435 3436 3437
}

static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);

M
Ming Lin 已提交
3438 3439 3440 3441 3442 3443
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3444
	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
M
Ming Lin 已提交
3445 3446 3447
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

3448 3449 3450 3451 3452 3453
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3454
	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3455 3456 3457
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);

3458 3459 3460 3461 3462 3463
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3464
	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3465 3466 3467
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);

M
Ming Lin 已提交
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
static ssize_t nvme_sysfs_show_address(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);

3478 3479 3480 3481 3482 3483 3484
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;

	if (ctrl->opts->max_reconnects == -1)
3485 3486 3487
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n",
			  opts->max_reconnects * opts->reconnect_delay);
3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
}

static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ctrl_loss_tmo, err;

	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
	if (err)
		return -EINVAL;

3501
	if (ctrl_loss_tmo < 0)
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516
		opts->max_reconnects = -1;
	else
		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
						opts->reconnect_delay);
	return count;
}
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);

static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->reconnect_delay == -1)
3517 3518
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
}

static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	unsigned int v;
	int err;

	err = kstrtou32(buf, 10, &v);
3529 3530
	if (err)
		return err;
3531 3532 3533 3534 3535 3536 3537

	ctrl->opts->reconnect_delay = v;
	return count;
}
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);

3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567
static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->fast_io_fail_tmo == -1)
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
}

static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int fast_io_fail_tmo, err;

	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
	if (err)
		return -EINVAL;

	if (fast_io_fail_tmo < 0)
		opts->fast_io_fail_tmo = -1;
	else
		opts->fast_io_fail_tmo = fast_io_fail_tmo;
	return count;
}
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);

3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601
static ssize_t cntrltype_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	static const char * const type[] = {
		[NVME_CTRL_IO] = "io\n",
		[NVME_CTRL_DISC] = "discovery\n",
		[NVME_CTRL_ADMIN] = "admin\n",
	};
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
		return sysfs_emit(buf, "reserved\n");

	return sysfs_emit(buf, type[ctrl->cntrltype]);
}
static DEVICE_ATTR_RO(cntrltype);

static ssize_t dctype_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
{
	static const char * const type[] = {
		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
		[NVME_DCTYPE_DDC] = "ddc\n",
		[NVME_DCTYPE_CDC] = "cdc\n",
	};
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
		return sysfs_emit(buf, "reserved\n");

	return sysfs_emit(buf, type[ctrl->dctype]);
}
static DEVICE_ATTR_RO(dctype);

3602 3603
static struct attribute *nvme_dev_attrs[] = {
	&dev_attr_reset_controller.attr,
K
Keith Busch 已提交
3604
	&dev_attr_rescan_controller.attr,
3605 3606 3607
	&dev_attr_model.attr,
	&dev_attr_serial.attr,
	&dev_attr_firmware_rev.attr,
M
Ming Lin 已提交
3608
	&dev_attr_cntlid.attr,
M
Ming Lin 已提交
3609 3610 3611 3612
	&dev_attr_delete_controller.attr,
	&dev_attr_transport.attr,
	&dev_attr_subsysnqn.attr,
	&dev_attr_address.attr,
3613
	&dev_attr_state.attr,
3614
	&dev_attr_numa_node.attr,
3615 3616
	&dev_attr_queue_count.attr,
	&dev_attr_sqsize.attr,
3617
	&dev_attr_hostnqn.attr,
3618
	&dev_attr_hostid.attr,
3619 3620
	&dev_attr_ctrl_loss_tmo.attr,
	&dev_attr_reconnect_delay.attr,
3621
	&dev_attr_fast_io_fail_tmo.attr,
3622
	&dev_attr_kato.attr,
3623 3624
	&dev_attr_cntrltype.attr,
	&dev_attr_dctype.attr,
3625 3626 3627
	NULL
};

M
Ming Lin 已提交
3628 3629 3630 3631 3632 3633
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3634 3635 3636 3637
	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
		return 0;
	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
		return 0;
3638 3639
	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
		return 0;
3640 3641
	if (a == &dev_attr_hostid.attr && !ctrl->opts)
		return 0;
3642 3643 3644 3645
	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
		return 0;
	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
		return 0;
3646 3647
	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
		return 0;
M
Ming Lin 已提交
3648 3649 3650 3651

	return a->mode;
}

3652
static const struct attribute_group nvme_dev_attrs_group = {
M
Ming Lin 已提交
3653 3654
	.attrs		= nvme_dev_attrs,
	.is_visible	= nvme_dev_attrs_are_visible,
3655 3656 3657 3658 3659 3660 3661
};

static const struct attribute_group *nvme_dev_attr_groups[] = {
	&nvme_dev_attrs_group,
	NULL,
};

3662
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
C
Christoph Hellwig 已提交
3663 3664 3665 3666
		unsigned nsid)
{
	struct nvme_ns_head *h;

3667
	lockdep_assert_held(&ctrl->subsys->lock);
C
Christoph Hellwig 已提交
3668

3669 3670 3671 3672 3673 3674 3675
	list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
		/*
		 * Private namespaces can share NSIDs under some conditions.
		 * In that case we can't use the same ns_head for namespaces
		 * with the same NSID.
		 */
		if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3676 3677
			continue;
		if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
C
Christoph Hellwig 已提交
3678 3679 3680 3681 3682 3683
			return h;
	}

	return NULL;
}

3684 3685
static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
		struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3686
{
3687 3688 3689
	bool has_uuid = !uuid_is_null(&ids->uuid);
	bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
	bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
C
Christoph Hellwig 已提交
3690 3691 3692 3693 3694
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
3695 3696 3697 3698 3699 3700 3701
		if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
			return -EINVAL;
		if (has_nguid &&
		    memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
			return -EINVAL;
		if (has_eui64 &&
		    memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
C
Christoph Hellwig 已提交
3702 3703 3704 3705 3706 3707
			return -EINVAL;
	}

	return 0;
}

3708 3709
static void nvme_cdev_rel(struct device *dev)
{
3710
	ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3711 3712
}

3713 3714 3715
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{
	cdev_device_del(cdev, cdev_device);
3716
	put_device(cdev_device);
3717 3718 3719 3720 3721 3722 3723
}

int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
		const struct file_operations *fops, struct module *owner)
{
	int minor, ret;

3724
	minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
3725 3726 3727 3728
	if (minor < 0)
		return minor;
	cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
	cdev_device->class = nvme_ns_chr_class;
3729
	cdev_device->release = nvme_cdev_rel;
3730 3731 3732 3733
	device_initialize(cdev_device);
	cdev_init(cdev, fops);
	cdev->owner = owner;
	ret = cdev_device_add(cdev, cdev_device);
3734
	if (ret)
3735
		put_device(cdev_device);
3736

3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
	return ret;
}

static int nvme_ns_chr_open(struct inode *inode, struct file *file)
{
	return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
}

static int nvme_ns_chr_release(struct inode *inode, struct file *file)
{
	nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
	return 0;
}

static const struct file_operations nvme_ns_chr_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_ns_chr_open,
	.release	= nvme_ns_chr_release,
	.unlocked_ioctl	= nvme_ns_chr_ioctl,
	.compat_ioctl	= compat_ptr_ioctl,
3757
	.uring_cmd	= nvme_ns_chr_uring_cmd,
3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
};

static int nvme_add_ns_cdev(struct nvme_ns *ns)
{
	int ret;

	ns->cdev_device.parent = ns->ctrl->device;
	ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
			   ns->ctrl->instance, ns->head->instance);
	if (ret)
		return ret;
3769 3770 3771

	return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
			     ns->ctrl->ops->module);
3772 3773
}

C
Christoph Hellwig 已提交
3774
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
K
Keith Busch 已提交
3775
		unsigned nsid, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3776 3777
{
	struct nvme_ns_head *head;
3778
	size_t size = sizeof(*head);
C
Christoph Hellwig 已提交
3779 3780
	int ret = -ENOMEM;

3781 3782 3783 3784 3785
#ifdef CONFIG_NVME_MULTIPATH
	size += num_possible_nodes() * sizeof(struct nvme_ns *);
#endif

	head = kzalloc(size, GFP_KERNEL);
C
Christoph Hellwig 已提交
3786 3787
	if (!head)
		goto out;
3788
	ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
C
Christoph Hellwig 已提交
3789 3790 3791 3792
	if (ret < 0)
		goto out_free_head;
	head->instance = ret;
	INIT_LIST_HEAD(&head->list);
3793 3794 3795
	ret = init_srcu_struct(&head->srcu);
	if (ret)
		goto out_ida_remove;
C
Christoph Hellwig 已提交
3796 3797
	head->subsys = ctrl->subsys;
	head->ns_id = nsid;
3798
	head->ids = *ids;
C
Christoph Hellwig 已提交
3799 3800
	kref_init(&head->ref);

3801 3802 3803 3804 3805 3806 3807
	if (head->ids.csi) {
		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
		if (ret)
			goto out_cleanup_srcu;
	} else
		head->effects = ctrl->effects;

3808 3809 3810 3811
	ret = nvme_mpath_alloc_disk(ctrl, head);
	if (ret)
		goto out_cleanup_srcu;

C
Christoph Hellwig 已提交
3812
	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3813 3814 3815

	kref_get(&ctrl->subsys->ref);

C
Christoph Hellwig 已提交
3816 3817 3818
	return head;
out_cleanup_srcu:
	cleanup_srcu_struct(&head->srcu);
3819
out_ida_remove:
3820
	ida_free(&ctrl->subsys->ns_ida, head->instance);
C
Christoph Hellwig 已提交
3821 3822 3823
out_free_head:
	kfree(head);
out:
3824 3825
	if (ret > 0)
		ret = blk_status_to_errno(nvme_error_status(ret));
C
Christoph Hellwig 已提交
3826 3827 3828
	return ERR_PTR(ret);
}

3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
		struct nvme_ns_ids *ids)
{
	struct nvme_subsystem *s;
	int ret = 0;

	/*
	 * Note that this check is racy as we try to avoid holding the global
	 * lock over the whole ns_head creation.  But it is only intended as
	 * a sanity check anyway.
	 */
	mutex_lock(&nvme_subsystems_lock);
	list_for_each_entry(s, &nvme_subsystems, entry) {
		if (s == this)
			continue;
		mutex_lock(&s->lock);
		ret = nvme_subsys_check_duplicate_ids(s, ids);
		mutex_unlock(&s->lock);
		if (ret)
			break;
	}
	mutex_unlock(&nvme_subsystems_lock);

	return ret;
}

C
Christoph Hellwig 已提交
3855
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3856
		struct nvme_ns_ids *ids, bool is_shared)
C
Christoph Hellwig 已提交
3857 3858 3859
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_ns_head *head = NULL;
3860 3861 3862 3863 3864 3865
	int ret;

	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
	if (ret) {
		dev_err(ctrl->device,
			"globally duplicate IDs for nsid %d\n", nsid);
3866
		nvme_print_device_info(ctrl);
3867 3868
		return ret;
	}
C
Christoph Hellwig 已提交
3869 3870

	mutex_lock(&ctrl->subsys->lock);
3871
	head = nvme_find_ns_head(ctrl, nsid);
C
Christoph Hellwig 已提交
3872
	if (!head) {
3873 3874 3875
		ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
		if (ret) {
			dev_err(ctrl->device,
3876 3877
				"duplicate IDs in subsystem for nsid %d\n",
				nsid);
3878 3879
			goto out_unlock;
		}
3880
		head = nvme_alloc_ns_head(ctrl, nsid, ids);
C
Christoph Hellwig 已提交
3881 3882 3883 3884
		if (IS_ERR(head)) {
			ret = PTR_ERR(head);
			goto out_unlock;
		}
3885
		head->shared = is_shared;
C
Christoph Hellwig 已提交
3886
	} else {
3887
		ret = -EINVAL;
3888
		if (!is_shared || !head->shared) {
3889
			dev_err(ctrl->device,
3890 3891
				"Duplicate unshared namespace %d\n", nsid);
			goto out_put_ns_head;
3892
		}
3893
		if (!nvme_ns_ids_equal(&head->ids, ids)) {
C
Christoph Hellwig 已提交
3894 3895 3896
			dev_err(ctrl->device,
				"IDs don't match for shared namespace %d\n",
					nsid);
3897
			goto out_put_ns_head;
C
Christoph Hellwig 已提交
3898
		}
3899 3900 3901 3902 3903 3904 3905 3906

		if (!multipath && !list_empty(&head->list)) {
			dev_warn(ctrl->device,
				"Found shared namespace %d, but multipathing not supported.\n",
				nsid);
			dev_warn_once(ctrl->device,
				"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
		}
C
Christoph Hellwig 已提交
3907 3908
	}

3909
	list_add_tail_rcu(&ns->siblings, &head->list);
C
Christoph Hellwig 已提交
3910
	ns->head = head;
3911 3912
	mutex_unlock(&ctrl->subsys->lock);
	return 0;
C
Christoph Hellwig 已提交
3913

3914 3915
out_put_ns_head:
	nvme_put_ns_head(head);
C
Christoph Hellwig 已提交
3916 3917 3918 3919 3920
out_unlock:
	mutex_unlock(&ctrl->subsys->lock);
	return ret;
}

3921
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3922
{
3923
	struct nvme_ns *ns, *ret = NULL;
3924

3925
	down_read(&ctrl->namespaces_rwsem);
3926
	list_for_each_entry(ns, &ctrl->namespaces, list) {
C
Christoph Hellwig 已提交
3927
		if (ns->head->ns_id == nsid) {
K
Kanchan Joshi 已提交
3928
			if (!nvme_get_ns(ns))
3929
				continue;
3930 3931 3932
			ret = ns;
			break;
		}
C
Christoph Hellwig 已提交
3933
		if (ns->head->ns_id > nsid)
3934 3935
			break;
	}
3936
	up_read(&ctrl->namespaces_rwsem);
3937
	return ret;
3938
}
3939
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3940

3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956
/*
 * Add the namespace to the controller list while keeping the list ordered.
 */
static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{
	struct nvme_ns *tmp;

	list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
		if (tmp->head->ns_id < ns->head->ns_id) {
			list_add(&ns->list, &tmp->list);
			return;
		}
	}
	list_add(&ns->list, &ns->ctrl->namespaces);
}

3957 3958
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
		struct nvme_ns_ids *ids)
3959 3960 3961
{
	struct nvme_ns *ns;
	struct gendisk *disk;
3962
	struct nvme_id_ns *id;
3963
	int node = ctrl->numa_node;
3964

3965
	if (nvme_identify_ns(ctrl, nsid, ids, &id))
3966 3967
		return;

3968 3969
	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
3970
		goto out_free_id;
3971

C
Christoph Hellwig 已提交
3972 3973
	disk = blk_mq_alloc_disk(ctrl->tagset, ns);
	if (IS_ERR(disk))
C
Christoph Hellwig 已提交
3974
		goto out_free_ns;
C
Christoph Hellwig 已提交
3975 3976 3977 3978 3979
	disk->fops = &nvme_bdev_ops;
	disk->private_data = ns;

	ns->disk = disk;
	ns->queue = disk->queue;
3980

3981
	if (ctrl->opts && ctrl->opts->data_digest)
3982
		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3983

3984
	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3985 3986 3987
	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

3988 3989 3990
	ns->ctrl = ctrl;
	kref_init(&ns->kref);

3991
	if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
C
Christoph Hellwig 已提交
3992
		goto out_cleanup_disk;
3993

3994
	/*
3995 3996 3997 3998 3999 4000 4001 4002 4003
	 * If multipathing is enabled, the device name for all disks and not
	 * just those that represent shared namespaces needs to be based on the
	 * subsystem instance.  Using the controller instance for private
	 * namespaces could lead to naming collisions between shared and private
	 * namespaces if they don't use a common numbering scheme.
	 *
	 * If multipathing is not enabled, disk names must use the controller
	 * instance as shared namespaces will show up as multiple block
	 * devices.
4004
	 */
4005 4006 4007 4008 4009 4010 4011 4012
	if (ns->head->disk) {
		sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
			ctrl->instance, ns->head->instance);
		disk->flags |= GENHD_FL_HIDDEN;
	} else if (multipath) {
		sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
			ns->head->instance);
	} else {
4013 4014
		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
			ns->head->instance);
4015
	}
4016

4017
	if (nvme_update_ns_info(ns, id))
C
Christoph Hellwig 已提交
4018
		goto out_unlink_ns;
4019

4020
	down_write(&ctrl->namespaces_rwsem);
4021
	nvme_ns_add_to_ctrl_list(ns);
4022
	up_write(&ctrl->namespaces_rwsem);
4023
	nvme_get_ctrl(ctrl);
4024

4025 4026 4027
	if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
		goto out_cleanup_ns_from_list;

4028 4029
	if (!nvme_ns_head_multipath(ns->head))
		nvme_add_ns_cdev(ns);
4030

C
Christoph Hellwig 已提交
4031
	nvme_mpath_add_disk(ns, id);
4032
	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
C
Christoph Hellwig 已提交
4033 4034
	kfree(id);

4035
	return;
C
Christoph Hellwig 已提交
4036

4037 4038 4039 4040 4041
 out_cleanup_ns_from_list:
	nvme_put_ctrl(ctrl);
	down_write(&ctrl->namespaces_rwsem);
	list_del_init(&ns->list);
	up_write(&ctrl->namespaces_rwsem);
C
Christoph Hellwig 已提交
4042 4043 4044
 out_unlink_ns:
	mutex_lock(&ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
4045 4046
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
C
Christoph Hellwig 已提交
4047
	mutex_unlock(&ctrl->subsys->lock);
4048
	nvme_put_ns_head(ns->head);
C
Christoph Hellwig 已提交
4049 4050
 out_cleanup_disk:
	blk_cleanup_disk(disk);
4051 4052
 out_free_ns:
	kfree(ns);
4053 4054
 out_free_id:
	kfree(id);
4055 4056 4057 4058
}

static void nvme_ns_remove(struct nvme_ns *ns)
{
4059 4060
	bool last_path = false;

4061 4062
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;
4063

4064
	clear_bit(NVME_NS_READY, &ns->flags);
4065
	set_capacity(ns->disk, 0);
4066
	nvme_fault_inject_fini(&ns->fault_inject);
4067

4068 4069 4070 4071 4072 4073 4074 4075 4076 4077
	/*
	 * Ensure that !NVME_NS_READY is seen by other threads to prevent
	 * this ns going back into current_path.
	 */
	synchronize_srcu(&ns->head->srcu);

	/* wait for concurrent submissions */
	if (nvme_mpath_clear_current_path(ns))
		synchronize_srcu(&ns->head->srcu);

4078 4079
	mutex_lock(&ns->ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
4080 4081 4082 4083
	if (list_empty(&ns->head->list)) {
		list_del_init(&ns->head->entry);
		last_path = true;
	}
4084
	mutex_unlock(&ns->ctrl->subsys->lock);
4085

4086 4087 4088
	/* guarantee not available in head->list */
	synchronize_rcu();

4089 4090 4091 4092
	if (!nvme_ns_head_multipath(ns->head))
		nvme_cdev_del(&ns->cdev, &ns->cdev_device);
	del_gendisk(ns->disk);
	blk_cleanup_queue(ns->queue);
4093

4094
	down_write(&ns->ctrl->namespaces_rwsem);
4095
	list_del_init(&ns->list);
4096
	up_write(&ns->ctrl->namespaces_rwsem);
4097

4098 4099
	if (last_path)
		nvme_mpath_shutdown_disk(ns->head);
4100 4101 4102
	nvme_put_ns(ns);
}

4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{
	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);

	if (ns) {
		nvme_ns_remove(ns);
		nvme_put_ns(ns);
	}
}

4113
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
4114 4115
{
	struct nvme_id_ns *id;
4116
	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
4117

4118 4119
	if (test_bit(NVME_NS_DEAD, &ns->flags))
		goto out;
C
Christoph Hellwig 已提交
4120

4121
	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
C
Christoph Hellwig 已提交
4122 4123 4124
	if (ret)
		goto out;

4125
	ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
4126
	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
4127
		dev_err(ns->ctrl->device,
C
Christoph Hellwig 已提交
4128
			"identifiers changed for nsid %d\n", ns->head->ns_id);
4129
		goto out_free_id;
C
Christoph Hellwig 已提交
4130 4131 4132
	}

	ret = nvme_update_ns_info(ns, id);
4133 4134

out_free_id:
C
Christoph Hellwig 已提交
4135 4136 4137
	kfree(id);
out:
	/*
4138
	 * Only remove the namespace if we got a fatal error back from the
C
Christoph Hellwig 已提交
4139
	 * device, otherwise ignore the error and just move on.
4140 4141
	 *
	 * TODO: we should probably schedule a delayed retry here.
C
Christoph Hellwig 已提交
4142
	 */
4143
	if (ret > 0 && (ret & NVME_SC_DNR))
4144
		nvme_ns_remove(ns);
C
Christoph Hellwig 已提交
4145 4146
}

4147
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
4148
{
4149
	struct nvme_ns_ids ids = { };
4150
	struct nvme_id_ns_cs_indep *id;
4151
	struct nvme_ns *ns;
4152
	bool ready = true;
4153

4154 4155
	if (nvme_identify_ns_descs(ctrl, nsid, &ids))
		return;
4156

4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169
	/*
	 * Check if the namespace is ready.  If not ignore it, we will get an
	 * AEN once it becomes ready and restart the scan.
	 */
	if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
	    !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
		ready = id->nstat & NVME_NSTAT_NRDY;
		kfree(id);
	}

	if (!ready)
		return;

4170
	ns = nvme_find_get_ns(ctrl, nsid);
4171
	if (ns) {
4172
		nvme_validate_ns(ns, &ids);
4173
		nvme_put_ns(ns);
4174 4175 4176
		return;
	}

4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
	switch (ids.csi) {
	case NVME_CSI_NVM:
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	case NVME_CSI_ZNS:
		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
			dev_warn(ctrl->device,
				"nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
				nsid);
			break;
		}
4188 4189 4190
		if (!nvme_multi_css(ctrl)) {
			dev_warn(ctrl->device,
				"command set not reported for nsid: %d\n",
4191
				nsid);
4192 4193
			break;
		}
4194 4195 4196 4197 4198 4199 4200
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	default:
		dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
			ids.csi, nsid);
		break;
	}
4201 4202
}

4203 4204 4205 4206
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{
	struct nvme_ns *ns, *next;
4207
	LIST_HEAD(rm_list);
4208

4209
	down_write(&ctrl->namespaces_rwsem);
4210
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4211
		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4212
			list_move_tail(&ns->list, &rm_list);
4213
	}
4214
	up_write(&ctrl->namespaces_rwsem);
4215 4216 4217 4218

	list_for_each_entry_safe(ns, next, &rm_list, list)
		nvme_ns_remove(ns);

4219 4220
}

4221
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4222
{
4223
	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4224
	__le32 *ns_list;
4225 4226
	u32 prev = 0;
	int ret = 0, i;
4227

4228 4229
	if (nvme_ctrl_limited_cns(ctrl))
		return -EOPNOTSUPP;
4230

4231
	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4232 4233 4234
	if (!ns_list)
		return -ENOMEM;

4235
	for (;;) {
4236 4237 4238 4239 4240 4241 4242 4243
		struct nvme_command cmd = {
			.identify.opcode	= nvme_admin_identify,
			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
			.identify.nsid		= cpu_to_le32(prev),
		};

		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
					    NVME_IDENTIFY_DATA_SIZE);
4244 4245 4246
		if (ret) {
			dev_warn(ctrl->device,
				"Identify NS List failed (status=0x%x)\n", ret);
4247
			goto free;
4248
		}
4249

4250
		for (i = 0; i < nr_entries; i++) {
4251
			u32 nsid = le32_to_cpu(ns_list[i]);
4252

4253 4254
			if (!nsid)	/* end of the list? */
				goto out;
4255
			nvme_validate_or_alloc_ns(ctrl, nsid);
4256 4257
			while (++prev < nsid)
				nvme_ns_remove_by_nsid(ctrl, prev);
4258 4259 4260
		}
	}
 out:
4261 4262
	nvme_remove_invalid_namespaces(ctrl, prev);
 free:
4263 4264 4265 4266
	kfree(ns_list);
	return ret;
}

4267
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4268
{
4269 4270 4271 4272 4273 4274 4275
	struct nvme_id_ctrl *id;
	u32 nn, i;

	if (nvme_identify_ctrl(ctrl, &id))
		return;
	nn = le32_to_cpu(id->nn);
	kfree(id);
4276

4277
	for (i = 1; i <= nn; i++)
4278
		nvme_validate_or_alloc_ns(ctrl, i);
4279

4280
	nvme_remove_invalid_namespaces(ctrl, nn);
4281 4282
}

4283
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4284 4285 4286
{
	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
	__le32 *log;
4287
	int error;
4288 4289 4290

	log = kzalloc(log_size, GFP_KERNEL);
	if (!log)
4291
		return;
4292

4293 4294 4295 4296 4297 4298
	/*
	 * We need to read the log to clear the AEN, but we don't want to rely
	 * on it for the changed namespace information as userspace could have
	 * raced with us in reading the log page, which could cause us to miss
	 * updates.
	 */
4299 4300
	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
			NVME_CSI_NVM, log, log_size, 0);
4301
	if (error)
4302 4303 4304 4305 4306 4307
		dev_warn(ctrl->device,
			"reading changed ns log failed: %d\n", error);

	kfree(log);
}

4308
static void nvme_scan_work(struct work_struct *work)
4309
{
4310 4311
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, scan_work);
4312
	int ret;
4313

K
Keith Busch 已提交
4314 4315
	/* No tagset on a live ctrl means IO queues could not created */
	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4316 4317
		return;

4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331
	/*
	 * Identify controller limits can change at controller reset due to
	 * new firmware download, even though it is not common we cannot ignore
	 * such scenario. Controller's non-mdts limits are reported in the unit
	 * of logical blocks that is dependent on the format of attached
	 * namespace. Hence re-read the limits at the time of ns allocation.
	 */
	ret = nvme_init_non_mdts_limits(ctrl);
	if (ret < 0) {
		dev_warn(ctrl->device,
			"reading non-mdts-limits failed: %d\n", ret);
		return;
	}

D
Dan Carpenter 已提交
4332
	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4333
		dev_info(ctrl->device, "rescanning namespaces.\n");
4334
		nvme_clear_changed_ns_log(ctrl);
4335 4336
	}

4337
	mutex_lock(&ctrl->scan_lock);
4338 4339
	if (nvme_scan_ns_list(ctrl) != 0)
		nvme_scan_ns_sequential(ctrl);
4340
	mutex_unlock(&ctrl->scan_lock);
4341
}
4342

4343 4344 4345 4346 4347
/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
4348 4349 4350
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;
4351
	LIST_HEAD(ns_list);
4352

4353 4354 4355 4356 4357 4358 4359
	/*
	 * make sure to requeue I/O to all namespaces as these
	 * might result from the scan itself and must complete
	 * for the scan_work to make progress
	 */
	nvme_mpath_clear_ctrl_paths(ctrl);

4360 4361 4362
	/* prevent racing with ns scanning */
	flush_work(&ctrl->scan_work);

4363 4364 4365 4366 4367 4368 4369 4370 4371
	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
	 * removing the namespaces' disks; fail all the queues now to avoid
	 * potentially having to clean up the failed sync later.
	 */
	if (ctrl->state == NVME_CTRL_DEAD)
		nvme_kill_queues(ctrl);

4372 4373 4374
	/* this is a no-op when called from the controller reset handler */
	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);

4375
	down_write(&ctrl->namespaces_rwsem);
4376
	list_splice_init(&ctrl->namespaces, &ns_list);
4377
	up_write(&ctrl->namespaces_rwsem);
4378 4379

	list_for_each_entry_safe(ns, next, &ns_list, list)
4380 4381
		nvme_ns_remove(ns);
}
4382
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4383

4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406
static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ret;

	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
	if (ret)
		return ret;

	if (opts) {
		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
				opts->trsvcid ?: "none");
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
				opts->host_traddr ?: "none");
4407 4408 4409 4410 4411
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
				opts->host_iface ?: "none");
4412 4413 4414 4415
	}
	return ret;
}

4416 4417 4418 4419 4420 4421 4422
static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
{
	char *envp[2] = { envdata, NULL };

	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
}

4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
	char *envp[2] = { NULL, NULL };
	u32 aen_result = ctrl->aen_result;

	ctrl->aen_result = 0;
	if (!aen_result)
		return;

	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
	if (!envp[0])
		return;
	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
	kfree(envp[0]);
}

4439 4440 4441 4442 4443
static void nvme_async_event_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, async_event_work);

4444
	nvme_aen_uevent(ctrl);
4445 4446 4447 4448 4449 4450 4451 4452

	/*
	 * The transport drivers must guarantee AER submission here is safe by
	 * flushing ctrl async_event_work after changing the controller state
	 * from LIVE and before freeing the admin queue.
	*/
	if (ctrl->state == NVME_CTRL_LIVE)
		ctrl->ops->submit_async_event(ctrl);
4453 4454
}

4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{

	u32 csts;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
		return false;

	if (csts == ~0)
		return false;

	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
	struct nvme_fw_slot_info_log *log;

	log = kmalloc(sizeof(*log), GFP_KERNEL);
	if (!log)
		return;

4477 4478
	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
			log, sizeof(*log), 0))
4479
		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500
	kfree(log);
}

static void nvme_fw_act_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(work,
				struct nvme_ctrl, fw_act_work);
	unsigned long fw_act_timeout;

	if (ctrl->mtfa)
		fw_act_timeout = jiffies +
				msecs_to_jiffies(ctrl->mtfa * 100);
	else
		fw_act_timeout = jiffies +
				msecs_to_jiffies(admin_timeout * 1000);

	nvme_stop_queues(ctrl);
	while (nvme_ctrl_pp_status(ctrl)) {
		if (time_after(jiffies, fw_act_timeout)) {
			dev_warn(ctrl->device,
				"Fw activation timeout, reset controller\n");
4501 4502
			nvme_try_sched_reset(ctrl);
			return;
4503 4504 4505 4506
		}
		msleep(100);
	}

4507
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4508 4509 4510
		return;

	nvme_start_queues(ctrl);
4511
	/* read FW slot information to clear the AER */
4512 4513 4514
	nvme_get_fw_slot_info(ctrl);
}

4515 4516
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
4517 4518
	u32 aer_notice_type = (result & 0xff00) >> 8;

4519 4520
	trace_nvme_async_event(ctrl, aer_notice_type);

4521
	switch (aer_notice_type) {
4522
	case NVME_AER_NOTICE_NS_CHANGED:
D
Dan Carpenter 已提交
4523
		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4524 4525 4526
		nvme_queue_scan(ctrl);
		break;
	case NVME_AER_NOTICE_FW_ACT_STARTING:
4527 4528 4529 4530 4531 4532 4533
		/*
		 * We are (ab)using the RESETTING state to prevent subsequent
		 * recovery actions from interfering with the controller's
		 * firmware activation.
		 */
		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
			queue_work(nvme_wq, &ctrl->fw_act_work);
4534
		break;
C
Christoph Hellwig 已提交
4535 4536 4537 4538 4539 4540 4541
#ifdef CONFIG_NVME_MULTIPATH
	case NVME_AER_NOTICE_ANA:
		if (!ctrl->ana_log_buf)
			break;
		queue_work(nvme_wq, &ctrl->ana_work);
		break;
#endif
4542 4543 4544
	case NVME_AER_NOTICE_DISC_CHANGED:
		ctrl->aen_result = result;
		break;
4545 4546 4547 4548 4549
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
}

4550
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4551
		volatile union nvme_result *res)
4552
{
4553
	u32 result = le32_to_cpu(res->u32);
4554
	u32 aer_type = result & 0x07;
4555

4556
	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4557 4558
		return;

4559
	switch (aer_type) {
4560 4561 4562
	case NVME_AER_NOTICE:
		nvme_handle_aen_notice(ctrl, result);
		break;
4563 4564 4565 4566
	case NVME_AER_ERROR:
	case NVME_AER_SMART:
	case NVME_AER_CSS:
	case NVME_AER_VS:
4567
		trace_nvme_async_event(ctrl, aer_type);
4568
		ctrl->aen_result = result;
4569 4570 4571
		break;
	default:
		break;
4572
	}
4573
	queue_work(nvme_wq, &ctrl->async_event_work);
4574 4575
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4576

4577
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4578
{
C
Christoph Hellwig 已提交
4579
	nvme_mpath_stop(ctrl);
4580
	nvme_stop_keep_alive(ctrl);
4581
	nvme_stop_failfast_work(ctrl);
4582
	flush_work(&ctrl->async_event_work);
4583
	cancel_work_sync(&ctrl->fw_act_work);
4584 4585 4586 4587 4588
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
4589
	nvme_start_keep_alive(ctrl);
4590

4591 4592
	nvme_enable_aen(ctrl);

4593 4594 4595
	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_start_queues(ctrl);
4596
		nvme_mpath_update(ctrl);
4597
	}
4598 4599

	nvme_change_uevent(ctrl, "NVME_EVENT=connected");
4600 4601
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4602

4603 4604
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
4605
	nvme_hwmon_exit(ctrl);
4606
	nvme_fault_inject_fini(&ctrl->fault_inject);
4607
	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4608
	cdev_device_del(&ctrl->cdev, ctrl->device);
4609
	nvme_put_ctrl(ctrl);
4610
}
4611
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4612

4613 4614 4615 4616 4617
static void nvme_free_cels(struct nvme_ctrl *ctrl)
{
	struct nvme_effects_log	*cel;
	unsigned long i;

4618
	xa_for_each(&ctrl->cels, i, cel) {
4619 4620 4621 4622 4623 4624 4625
		xa_erase(&ctrl->cels, i);
		kfree(cel);
	}

	xa_destroy(&ctrl->cels);
}

4626
static void nvme_free_ctrl(struct device *dev)
4627
{
4628 4629
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
C
Christoph Hellwig 已提交
4630
	struct nvme_subsystem *subsys = ctrl->subsys;
4631

K
Keith Busch 已提交
4632
	if (!subsys || ctrl->instance != subsys->instance)
4633
		ida_free(&nvme_instance_ida, ctrl->instance);
4634

4635
	nvme_free_cels(ctrl);
C
Christoph Hellwig 已提交
4636
	nvme_mpath_uninit(ctrl);
S
Sagi Grimberg 已提交
4637
	__free_page(ctrl->discard_page);
4638

C
Christoph Hellwig 已提交
4639
	if (subsys) {
4640
		mutex_lock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4641 4642
		list_del(&ctrl->subsys_entry);
		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4643
		mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4644
	}
4645 4646 4647

	ctrl->ops->free_ctrl(ctrl);

C
Christoph Hellwig 已提交
4648 4649
	if (subsys)
		nvme_put_subsystem(subsys);
4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661
}

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
	int ret;

4662
	ctrl->state = NVME_CTRL_NEW;
4663
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4664
	spin_lock_init(&ctrl->lock);
4665
	mutex_init(&ctrl->scan_lock);
4666
	INIT_LIST_HEAD(&ctrl->namespaces);
4667
	xa_init(&ctrl->cels);
4668
	init_rwsem(&ctrl->namespaces_rwsem);
4669 4670 4671
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->quirks = quirks;
4672
	ctrl->numa_node = NUMA_NO_NODE;
4673
	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4674
	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4675
	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4676
	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4677
	init_waitqueue_head(&ctrl->state_wq);
4678

4679
	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4680
	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4681 4682 4683
	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;

4684 4685 4686 4687 4688 4689 4690 4691
	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
			PAGE_SIZE);
	ctrl->discard_page = alloc_page(GFP_KERNEL);
	if (!ctrl->discard_page) {
		ret = -ENOMEM;
		goto out;
	}

4692
	ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
4693
	if (ret < 0)
4694
		goto out;
4695
	ctrl->instance = ret;
4696

4697 4698
	device_initialize(&ctrl->ctrl_device);
	ctrl->device = &ctrl->ctrl_device;
4699 4700
	ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
			ctrl->instance);
4701 4702 4703 4704 4705 4706 4707
	ctrl->device->class = nvme_class;
	ctrl->device->parent = ctrl->dev;
	ctrl->device->groups = nvme_dev_attr_groups;
	ctrl->device->release = nvme_free_ctrl;
	dev_set_drvdata(ctrl->device, ctrl);
	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
	if (ret)
4708 4709
		goto out_release_instance;

4710
	nvme_get_ctrl(ctrl);
4711 4712 4713
	cdev_init(&ctrl->cdev, &nvme_dev_fops);
	ctrl->cdev.owner = ops->module;
	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4714 4715
	if (ret)
		goto out_free_name;
4716

4717 4718 4719 4720 4721 4722 4723 4724
	/*
	 * Initialize latency tolerance controls.  The sysfs files won't
	 * be visible to userspace unless the device actually supports APST.
	 */
	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
		min(default_ps_max_latency_us, (unsigned long)S32_MAX));

4725
	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4726
	nvme_mpath_init_ctrl(ctrl);
4727

4728
	return 0;
4729
out_free_name:
4730
	nvme_put_ctrl(ctrl);
4731
	kfree_const(ctrl->device->kobj.name);
4732
out_release_instance:
4733
	ida_free(&nvme_instance_ida, ctrl->instance);
4734
out:
4735 4736
	if (ctrl->discard_page)
		__free_page(ctrl->discard_page);
4737 4738
	return ret;
}
4739
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4740

4741 4742
static void nvme_start_ns_queue(struct nvme_ns *ns)
{
M
Ming Lei 已提交
4743 4744
	if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
		blk_mq_unquiesce_queue(ns->queue);
4745 4746 4747 4748
}

static void nvme_stop_ns_queue(struct nvme_ns *ns)
{
M
Ming Lei 已提交
4749 4750
	if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
		blk_mq_quiesce_queue(ns->queue);
M
Ming Lei 已提交
4751 4752
	else
		blk_mq_wait_quiesce_done(ns->queue);
4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767
}

/*
 * Prepare a queue for teardown.
 *
 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
 * the capacity to 0 after that to avoid blocking dispatchers that may be
 * holding bd_butex.  This will end buffered writers dirtying pages that can't
 * be synced.
 */
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
	if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
		return;

4768
	blk_mark_disk_dead(ns->disk);
4769 4770 4771 4772 4773
	nvme_start_ns_queue(ns);

	set_capacity_and_notify(ns->disk, 0);
}

4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784
/**
 * nvme_kill_queues(): Ends all namespace queues
 * @ctrl: the dead controller that needs to end
 *
 * Call this function when the driver determines it is unable to get the
 * controller in a state capable of servicing IO.
 */
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4785
	down_read(&ctrl->namespaces_rwsem);
M
Ming Lei 已提交
4786

4787
	/* Forcibly unquiesce queues to avoid blocking dispatch */
I
Igor Konopko 已提交
4788
	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4789
		nvme_start_admin_queue(ctrl);
4790

4791 4792
	list_for_each_entry(ns, &ctrl->namespaces, list)
		nvme_set_queue_dying(ns);
4793

4794
	up_read(&ctrl->namespaces_rwsem);
4795
}
4796
EXPORT_SYMBOL_GPL(nvme_kill_queues);
4797

K
Keith Busch 已提交
4798 4799 4800 4801
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4802
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4803 4804
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_unfreeze_queue(ns->queue);
4805
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4806 4807 4808
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);

4809
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
K
Keith Busch 已提交
4810 4811 4812
{
	struct nvme_ns *ns;

4813
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4814 4815 4816 4817 4818
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
		if (timeout <= 0)
			break;
	}
4819
	up_read(&ctrl->namespaces_rwsem);
4820
	return timeout;
K
Keith Busch 已提交
4821 4822 4823 4824 4825 4826 4827
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4828
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4829 4830
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_freeze_queue_wait(ns->queue);
4831
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4832 4833 4834 4835 4836 4837 4838
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4839
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4840
	list_for_each_entry(ns, &ctrl->namespaces, list)
4841
		blk_freeze_queue_start(ns->queue);
4842
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4843 4844 4845
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);

4846
void nvme_stop_queues(struct nvme_ctrl *ctrl)
4847 4848 4849
{
	struct nvme_ns *ns;

4850
	down_read(&ctrl->namespaces_rwsem);
4851
	list_for_each_entry(ns, &ctrl->namespaces, list)
4852
		nvme_stop_ns_queue(ns);
4853
	up_read(&ctrl->namespaces_rwsem);
4854
}
4855
EXPORT_SYMBOL_GPL(nvme_stop_queues);
4856

4857
void nvme_start_queues(struct nvme_ctrl *ctrl)
4858 4859 4860
{
	struct nvme_ns *ns;

4861
	down_read(&ctrl->namespaces_rwsem);
4862
	list_for_each_entry(ns, &ctrl->namespaces, list)
4863
		nvme_start_ns_queue(ns);
4864
	up_read(&ctrl->namespaces_rwsem);
4865
}
4866
EXPORT_SYMBOL_GPL(nvme_start_queues);
4867

4868 4869
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
{
M
Ming Lei 已提交
4870 4871
	if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
		blk_mq_quiesce_queue(ctrl->admin_q);
M
Ming Lei 已提交
4872 4873
	else
		blk_mq_wait_quiesce_done(ctrl->admin_q);
4874 4875 4876 4877 4878
}
EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);

void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
{
M
Ming Lei 已提交
4879 4880
	if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
		blk_mq_unquiesce_queue(ctrl->admin_q);
4881 4882 4883
}
EXPORT_SYMBOL_GPL(nvme_start_admin_queue);

C
Chao Leng 已提交
4884
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
K
Keith Busch 已提交
4885 4886 4887 4888 4889 4890 4891
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_sync_queue(ns->queue);
	up_read(&ctrl->namespaces_rwsem);
C
Chao Leng 已提交
4892 4893
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4894

C
Chao Leng 已提交
4895 4896 4897
void nvme_sync_queues(struct nvme_ctrl *ctrl)
{
	nvme_sync_io_queues(ctrl);
4898 4899
	if (ctrl->admin_q)
		blk_sync_queue(ctrl->admin_q);
K
Keith Busch 已提交
4900 4901 4902
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);

4903
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4904
{
4905 4906 4907
	if (file->f_op != &nvme_dev_fops)
		return NULL;
	return file->private_data;
4908
}
4909
EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4910

4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928
/*
 * Check we didn't inadvertently grow the command structure sizes:
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4929 4930
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
			NVME_IDENTIFY_DATA_SIZE);
K
Keith Busch 已提交
4931
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4932
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
K
Keith Busch 已提交
4933
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4934
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4935 4936 4937 4938
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4939
	BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
4940 4941 4942
}


4943
static int __init nvme_core_init(void)
4944
{
4945
	int result = -ENOMEM;
4946

4947 4948
	_nvme_check_size();

4949 4950 4951
	nvme_wq = alloc_workqueue("nvme-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_wq)
4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962
		goto out;

	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_reset_wq)
		goto destroy_wq;

	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_delete_wq)
		goto destroy_reset_wq;
4963

4964 4965
	result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
			NVME_MINORS, "nvme");
4966
	if (result < 0)
4967
		goto destroy_delete_wq;
4968 4969 4970 4971 4972 4973

	nvme_class = class_create(THIS_MODULE, "nvme");
	if (IS_ERR(nvme_class)) {
		result = PTR_ERR(nvme_class);
		goto unregister_chrdev;
	}
4974
	nvme_class->dev_uevent = nvme_class_uevent;
4975

C
Christoph Hellwig 已提交
4976 4977 4978 4979 4980
	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
	if (IS_ERR(nvme_subsys_class)) {
		result = PTR_ERR(nvme_subsys_class);
		goto destroy_class;
	}
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992

	result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
				     "nvme-generic");
	if (result < 0)
		goto destroy_subsys_class;

	nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
	if (IS_ERR(nvme_ns_chr_class)) {
		result = PTR_ERR(nvme_ns_chr_class);
		goto unregister_generic_ns;
	}

4993
	return 0;
4994

4995 4996 4997 4998
unregister_generic_ns:
	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
	class_destroy(nvme_subsys_class);
C
Christoph Hellwig 已提交
4999 5000
destroy_class:
	class_destroy(nvme_class);
5001
unregister_chrdev:
5002
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
5003 5004 5005 5006
destroy_delete_wq:
	destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
	destroy_workqueue(nvme_reset_wq);
5007 5008
destroy_wq:
	destroy_workqueue(nvme_wq);
5009
out:
5010
	return result;
5011 5012
}

5013
static void __exit nvme_core_exit(void)
5014
{
5015
	class_destroy(nvme_ns_chr_class);
C
Christoph Hellwig 已提交
5016
	class_destroy(nvme_subsys_class);
5017
	class_destroy(nvme_class);
5018
	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
5019
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
5020 5021
	destroy_workqueue(nvme_delete_wq);
	destroy_workqueue(nvme_reset_wq);
5022
	destroy_workqueue(nvme_wq);
5023
	ida_destroy(&nvme_ns_chr_minor_ida);
M
Max Gurtovoy 已提交
5024
	ida_destroy(&nvme_instance_ida);
5025
}
5026 5027 5028 5029 5030

MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
module_init(nvme_core_init);
module_exit(nvme_core_exit);