core.c 124.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
9
#include <linux/blk-integrity.h>
10
#include <linux/compat.h>
11
#include <linux/delay.h>
12
#include <linux/errno.h>
13
#include <linux/hdreg.h>
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/backing-dev.h>
17 18
#include <linux/slab.h>
#include <linux/types.h>
19 20 21
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
22
#include <linux/pm_qos.h>
23
#include <asm/unaligned.h>
24 25

#include "nvme.h"
S
Sagi Grimberg 已提交
26
#include "fabrics.h"
27

H
Hannes Reinecke 已提交
28 29 30
#define CREATE_TRACE_POINTS
#include "trace.h"

31 32
#define NVME_MINORS		(1U << MINORBITS)

33 34
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
35
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36
EXPORT_SYMBOL_GPL(admin_timeout);
37

38 39
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41
EXPORT_SYMBOL_GPL(nvme_io_timeout);
42

43
static unsigned char shutdown_timeout = 5;
44 45 46
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

47 48
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
K
Keith Busch 已提交
49
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50

51
static unsigned long default_ps_max_latency_us = 100000;
52 53 54 55
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

56 57 58 59
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static unsigned long apst_primary_timeout_ms = 100;
module_param(apst_primary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_primary_timeout_ms,
	"primary APST timeout in ms");

static unsigned long apst_secondary_timeout_ms = 2000;
module_param(apst_secondary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_timeout_ms,
	"secondary APST timeout in ms");

static unsigned long apst_primary_latency_tol_us = 15000;
module_param(apst_primary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_primary_latency_tol_us,
	"primary APST latency tolerance in us");

static unsigned long apst_secondary_latency_tol_us = 100000;
module_param(apst_secondary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_latency_tol_us,
	"secondary APST latency tolerance in us");

80 81 82 83 84
/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
85 86
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
87 88 89 90
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
91 92 93
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

94 95 96 97 98 99
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);

C
Christoph Hellwig 已提交
100 101
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
102

103
static DEFINE_IDA(nvme_instance_ida);
104
static dev_t nvme_ctrl_base_chr_devt;
105
static struct class *nvme_class;
C
Christoph Hellwig 已提交
106
static struct class *nvme_subsys_class;
107

108 109 110 111
static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
static struct class *nvme_ns_chr_class;

112
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
113 114
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);
115 116
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd);
117

118
void nvme_queue_scan(struct nvme_ctrl *ctrl)
119 120 121 122
{
	/*
	 * Only new queue scan work when admin and IO queues are both alive
	 */
K
Keith Busch 已提交
123
	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
124 125 126
		queue_work(nvme_wq, &ctrl->scan_work);
}

127 128 129 130 131 132
/*
 * Use this function to proceed with scheduling reset_work for a controller
 * that had previously been set to the resetting state. This is intended for
 * code paths that can't be interrupted by other reset attempts. A hot removal
 * may prevent this from succeeding.
 */
133
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
134 135 136 137 138 139 140
{
	if (ctrl->state != NVME_CTRL_RESETTING)
		return -EBUSY;
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
		return -EBUSY;
	return 0;
}
141
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
142

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void nvme_failfast_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, failfast_work);

	if (ctrl->state != NVME_CTRL_CONNECTING)
		return;

	set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
	dev_info(ctrl->device, "failfast expired\n");
	nvme_kick_requeue_lists(ctrl);
}

static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
		return;

	schedule_delayed_work(&ctrl->failfast_work,
			      ctrl->opts->fast_io_fail_tmo * HZ);
}

static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts)
		return;

	cancel_delayed_work_sync(&ctrl->failfast_work);
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
}


175 176 177 178
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
179
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
180 181 182 183 184
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

185
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
186 187 188 189
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
190
	if (!ret) {
191
		flush_work(&ctrl->reset_work);
K
Keith Busch 已提交
192
		if (ctrl->state != NVME_CTRL_LIVE)
193 194 195
			ret = -ENETRESET;
	}

196 197 198
	return ret;
}

199
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
200
{
201
	dev_info(ctrl->device,
202
		 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
203

204
	flush_work(&ctrl->reset_work);
205 206
	nvme_stop_ctrl(ctrl);
	nvme_remove_namespaces(ctrl);
207
	ctrl->ops->delete_ctrl(ctrl);
208
	nvme_uninit_ctrl(ctrl);
209 210
}

211 212 213 214 215 216 217 218
static void nvme_delete_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, delete_work);

	nvme_do_delete_ctrl(ctrl);
}

219 220 221 222
int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
		return -EBUSY;
223
	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
224 225 226 227 228
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);

229
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
230 231
{
	/*
232 233
	 * Keep a reference until nvme_do_delete_ctrl() complete,
	 * since ->delete_ctrl can free the controller.
234 235
	 */
	nvme_get_ctrl(ctrl);
236
	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
237
		nvme_do_delete_ctrl(ctrl);
238 239 240
	nvme_put_ctrl(ctrl);
}

241
static blk_status_t nvme_error_status(u16 status)
242
{
243
	switch (status & 0x7ff) {
244
	case NVME_SC_SUCCESS:
245
		return BLK_STS_OK;
246
	case NVME_SC_CAP_EXCEEDED:
247
		return BLK_STS_NOSPC;
248
	case NVME_SC_LBA_RANGE:
249 250
	case NVME_SC_CMD_INTERRUPTED:
	case NVME_SC_NS_NOT_READY:
251 252
		return BLK_STS_TARGET;
	case NVME_SC_BAD_ATTRIBUTES:
253
	case NVME_SC_ONCS_NOT_SUPPORTED:
254 255 256
	case NVME_SC_INVALID_OPCODE:
	case NVME_SC_INVALID_FIELD:
	case NVME_SC_INVALID_NS:
257
		return BLK_STS_NOTSUPP;
258 259 260
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
261 262
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
263
	case NVME_SC_COMPARE_FAILED:
264
		return BLK_STS_MEDIUM;
265 266 267 268 269 270 271
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
272 273
	case NVME_SC_HOST_PATH_ERROR:
		return BLK_STS_TRANSPORT;
K
Keith Busch 已提交
274 275 276 277
	case NVME_SC_ZONE_TOO_MANY_ACTIVE:
		return BLK_STS_ZONE_ACTIVE_RESOURCE;
	case NVME_SC_ZONE_TOO_MANY_OPEN:
		return BLK_STS_ZONE_OPEN_RESOURCE;
278 279
	default:
		return BLK_STS_IOERR;
280 281 282
	}
}

283 284 285 286 287 288 289
static void nvme_retry_req(struct request *req)
{
	unsigned long delay = 0;
	u16 crd;

	/* The mask and shift result must be <= 3 */
	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
290 291
	if (crd)
		delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
292 293 294 295 296 297

	nvme_req(req)->retries++;
	blk_mq_requeue_request(req, false);
	blk_mq_delay_kick_requeue_list(req->q, delay);
}

A
Alan Adamson 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
static void nvme_log_error(struct request *req)
{
	struct nvme_ns *ns = req->q->queuedata;
	struct nvme_request *nr = nvme_req(req);

	if (ns) {
		pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
		       ns->disk ? ns->disk->disk_name : "?",
		       nvme_get_opcode_str(nr->cmd->common.opcode),
		       nr->cmd->common.opcode,
		       (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
		       (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
		       nvme_get_error_status_str(nr->status),
		       nr->status >> 8 & 7,	/* Status Code Type */
		       nr->status & 0xff,	/* Status Code */
		       nr->status & NVME_SC_MORE ? "MORE " : "",
		       nr->status & NVME_SC_DNR  ? "DNR "  : "");
		return;
	}

	pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
			   dev_name(nr->ctrl->device),
			   nvme_get_admin_opcode_str(nr->cmd->common.opcode),
			   nr->cmd->common.opcode,
			   nvme_get_error_status_str(nr->status),
			   nr->status >> 8 & 7,	/* Status Code Type */
			   nr->status & 0xff,	/* Status Code */
			   nr->status & NVME_SC_MORE ? "MORE " : "",
			   nr->status & NVME_SC_DNR  ? "DNR "  : "");
}

329 330 331 332 333 334 335
enum nvme_disposition {
	COMPLETE,
	RETRY,
	FAILOVER,
};

static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
336
{
337 338
	if (likely(nvme_req(req)->status == 0))
		return COMPLETE;
339

340 341 342 343
	if (blk_noretry_request(req) ||
	    (nvme_req(req)->status & NVME_SC_DNR) ||
	    nvme_req(req)->retries >= nvme_max_retries)
		return COMPLETE;
344

345
	if (req->cmd_flags & REQ_NVME_MPATH) {
346 347
		if (nvme_is_path_error(nvme_req(req)->status) ||
		    blk_queue_dying(req->q))
348
			return FAILOVER;
349 350 351
	} else {
		if (blk_queue_dying(req->q))
			return COMPLETE;
352
	}
353

354 355
	return RETRY;
}
356

357
static inline void nvme_end_req_zoned(struct request *req)
358 359 360
{
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
	    req_op(req) == REQ_OP_ZONE_APPEND)
K
Keith Busch 已提交
361 362
		req->__sector = nvme_lba_to_sect(req->q->queuedata,
			le64_to_cpu(nvme_req(req)->result.u64));
363 364 365 366 367
}

static inline void nvme_end_req(struct request *req)
{
	blk_status_t status = nvme_error_status(nvme_req(req)->status);
H
Hannes Reinecke 已提交
368

A
Alan Adamson 已提交
369 370
	if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
		nvme_log_error(req);
371
	nvme_end_req_zoned(req);
372
	nvme_trace_bio_complete(req);
373
	blk_mq_end_request(req, status);
374
}
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395

void nvme_complete_rq(struct request *req)
{
	trace_nvme_complete_rq(req);
	nvme_cleanup_cmd(req);

	if (nvme_req(req)->ctrl->kas)
		nvme_req(req)->ctrl->comp_seen = true;

	switch (nvme_decide_disposition(req)) {
	case COMPLETE:
		nvme_end_req(req);
		return;
	case RETRY:
		nvme_retry_req(req);
		return;
	case FAILOVER:
		nvme_failover_req(req);
		return;
	}
}
396 397
EXPORT_SYMBOL_GPL(nvme_complete_rq);

398 399
void nvme_complete_batch_req(struct request *req)
{
400
	trace_nvme_complete_rq(req);
401 402 403 404 405
	nvme_cleanup_cmd(req);
	nvme_end_req_zoned(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/*
 * Called to unwind from ->queue_rq on a failed command submission so that the
 * multipathing code gets called to potentially failover to another path.
 * The caller needs to unwind all transport specific resource allocations and
 * must return propagate the return value.
 */
blk_status_t nvme_host_path_error(struct request *req)
{
	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
	blk_mq_set_request_complete(req);
	nvme_complete_rq(req);
	return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);

421
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
422 423 424 425
{
	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

426 427 428 429
	/* don't abort one completed request */
	if (blk_mq_request_completed(req))
		return true;

430
	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
431
	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
432
	blk_mq_complete_request(req);
433
	return true;
434 435 436
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

C
Chao Leng 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->tagset) {
		blk_mq_tagset_busy_iter(ctrl->tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);

void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->admin_tagset) {
		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);

457 458 459
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
460
	enum nvme_ctrl_state old_state;
461
	unsigned long flags;
462 463
	bool changed = false;

464
	spin_lock_irqsave(&ctrl->lock, flags);
465 466

	old_state = ctrl->state;
467 468 469
	switch (new_state) {
	case NVME_CTRL_LIVE:
		switch (old_state) {
470
		case NVME_CTRL_NEW:
471
		case NVME_CTRL_RESETTING:
472
		case NVME_CTRL_CONNECTING:
473
			changed = true;
474
			fallthrough;
475 476 477 478 479 480 481
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
482 483
		case NVME_CTRL_LIVE:
			changed = true;
484
			fallthrough;
485 486 487 488
		default:
			break;
		}
		break;
489
	case NVME_CTRL_CONNECTING:
490
		switch (old_state) {
491
		case NVME_CTRL_NEW:
492
		case NVME_CTRL_RESETTING:
493
			changed = true;
494
			fallthrough;
495 496 497 498 499 500 501 502
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
		case NVME_CTRL_RESETTING:
503
		case NVME_CTRL_CONNECTING:
504
			changed = true;
505
			fallthrough;
506 507 508 509
		default:
			break;
		}
		break;
510 511 512 513 514
	case NVME_CTRL_DELETING_NOIO:
		switch (old_state) {
		case NVME_CTRL_DELETING:
		case NVME_CTRL_DEAD:
			changed = true;
515
			fallthrough;
516 517 518 519
		default:
			break;
		}
		break;
520 521 522 523
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
524
			fallthrough;
525 526 527 528
		default:
			break;
		}
		break;
529 530 531 532
	default:
		break;
	}

533
	if (changed) {
534
		ctrl->state = new_state;
535 536
		wake_up_all(&ctrl->state_wq);
	}
537

538
	spin_unlock_irqrestore(&ctrl->lock, flags);
539 540 541 542 543 544
	if (!changed)
		return false;

	if (ctrl->state == NVME_CTRL_LIVE) {
		if (old_state == NVME_CTRL_CONNECTING)
			nvme_stop_failfast_work(ctrl);
545
		nvme_kick_requeue_lists(ctrl);
546 547 548 549
	} else if (ctrl->state == NVME_CTRL_CONNECTING &&
		old_state == NVME_CTRL_RESETTING) {
		nvme_start_failfast_work(ctrl);
	}
550 551 552 553
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

554 555 556 557 558 559 560 561 562 563 564 565
/*
 * Returns true for sink states that can't ever transition back to live.
 */
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
{
	switch (ctrl->state) {
	case NVME_CTRL_NEW:
	case NVME_CTRL_LIVE:
	case NVME_CTRL_RESETTING:
	case NVME_CTRL_CONNECTING:
		return false;
	case NVME_CTRL_DELETING:
566
	case NVME_CTRL_DELETING_NOIO:
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	case NVME_CTRL_DEAD:
		return true;
	default:
		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
		return true;
	}
}

/*
 * Waits for the controller state to be resetting, or returns false if it is
 * not possible to ever transition to that state.
 */
bool nvme_wait_reset(struct nvme_ctrl *ctrl)
{
	wait_event(ctrl->state_wq,
		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
		   nvme_state_terminal(ctrl));
	return ctrl->state == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);

C
Christoph Hellwig 已提交
588 589 590 591 592
static void nvme_free_ns_head(struct kref *ref)
{
	struct nvme_ns_head *head =
		container_of(ref, struct nvme_ns_head, ref);

593
	nvme_mpath_remove_disk(head);
594
	ida_free(&head->subsys->ns_ida, head->instance);
595
	cleanup_srcu_struct(&head->srcu);
596
	nvme_put_subsystem(head->subsys);
C
Christoph Hellwig 已提交
597 598 599
	kfree(head);
}

600
bool nvme_tryget_ns_head(struct nvme_ns_head *head)
601 602 603 604
{
	return kref_get_unless_zero(&head->ref);
}

605
void nvme_put_ns_head(struct nvme_ns_head *head)
C
Christoph Hellwig 已提交
606 607 608 609
{
	kref_put(&head->ref, nvme_free_ns_head);
}

610 611 612 613 614
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

	put_disk(ns->disk);
C
Christoph Hellwig 已提交
615
	nvme_put_ns_head(ns->head);
616
	nvme_put_ctrl(ns->ctrl);
617 618 619
	kfree(ns);
}

K
Kanchan Joshi 已提交
620 621 622 623 624
static inline bool nvme_get_ns(struct nvme_ns *ns)
{
	return kref_get_unless_zero(&ns->kref);
}

625
void nvme_put_ns(struct nvme_ns *ns)
626 627 628
{
	kref_put(&ns->kref, nvme_free_ns);
}
629
EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
630

631 632
static inline void nvme_clear_nvme_request(struct request *req)
{
633
	nvme_req(req)->status = 0;
634 635 636
	nvme_req(req)->retries = 0;
	nvme_req(req)->flags = 0;
	req->rq_flags |= RQF_DONTPREP;
637 638
}

639
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
640
{
641 642
	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
643

644 645 646
static inline void nvme_init_request(struct request *req,
		struct nvme_command *cmd)
{
647 648 649
	if (req->q->queuedata)
		req->timeout = NVME_IO_TIMEOUT;
	else /* no queuedata implies admin queue */
650
		req->timeout = NVME_ADMIN_TIMEOUT;
651

652 653 654
	/* passthru commands should let the driver set the SGL flags */
	cmd->common.flags &= ~NVME_CMD_SGL_ALL;

655
	req->cmd_flags |= REQ_FAILFAST_DRIVER;
656
	if (req->mq_hctx->type == HCTX_TYPE_POLL)
657
		req->cmd_flags |= REQ_POLLED;
658
	nvme_clear_nvme_request(req);
659
	memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
660
}
661

662 663 664 665 666 667 668 669
struct request *nvme_alloc_request(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags)
{
	struct request *req;

	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
670 671
	return req;
}
672
EXPORT_SYMBOL_GPL(nvme_alloc_request);
673

674
static struct request *nvme_alloc_request_qid(struct request_queue *q,
675 676 677 678 679 680 681 682 683 684 685
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
	struct request *req;

	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
			qid ? qid - 1 : 0);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
	return req;
}

686 687 688 689 690 691 692 693 694 695 696 697 698
/*
 * For something we're not in a state to send to the device the default action
 * is to busy it and retry it after the controller state is recovered.  However,
 * if the controller is deleting or if anything is marked for failfast or
 * nvme multipath it is immediately failed.
 *
 * Note: commands used to initialize the controller will be marked for failfast.
 * Note: nvme cli/ioctl commands are marked for failfast.
 */
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
		struct request *rq)
{
	if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
699
	    ctrl->state != NVME_CTRL_DELETING &&
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	    ctrl->state != NVME_CTRL_DEAD &&
	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
		return BLK_STS_RESOURCE;
	return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);

bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live)
{
	struct nvme_request *req = nvme_req(rq);

	/*
	 * currently we have a problem sending passthru commands
	 * on the admin_q if the controller is not LIVE because we can't
	 * make sure that they are going out after the admin connect,
	 * controller enable and/or other commands in the initialization
	 * sequence. until the controller will be LIVE, fail with
	 * BLK_STS_RESOURCE so that they will be rescheduled.
	 */
	if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
		return false;

	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * Only allow commands on a live queue, except for the connect
		 * command, which is require to set the queue live in the
		 * appropinquate states.
		 */
		switch (ctrl->state) {
		case NVME_CTRL_CONNECTING:
			if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
			    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
				return true;
			break;
		default:
			break;
		case NVME_CTRL_DEAD:
			return false;
		}
	}

	return queue_live;
}
EXPORT_SYMBOL_GPL(__nvme_check_ready);

M
Ming Lin 已提交
747 748 749
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
750
	memset(cmnd, 0, sizeof(*cmnd));
M
Ming Lin 已提交
751
	cmnd->common.opcode = nvme_cmd_flush;
C
Christoph Hellwig 已提交
752
	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
M
Ming Lin 已提交
753 754
}

755
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
756 757
		struct nvme_command *cmnd)
{
758
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
M
Ming Lin 已提交
759
	struct nvme_dsm_range *range;
760
	struct bio *bio;
M
Ming Lin 已提交
761

762 763 764 765 766 767 768 769
	/*
	 * Some devices do not consider the DSM 'Number of Ranges' field when
	 * determining how much data to DMA. Always allocate memory for maximum
	 * number of segments to prevent device reading beyond end of buffer.
	 */
	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;

	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
770 771 772 773 774 775 776 777 778 779 780
	if (!range) {
		/*
		 * If we fail allocation our range, fallback to the controller
		 * discard page. If that's also busy, it's safe to return
		 * busy, as we know we can make progress once that's freed.
		 */
		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
			return BLK_STS_RESOURCE;

		range = page_address(ns->ctrl->discard_page);
	}
M
Ming Lin 已提交
781

782
	__rq_for_each_bio(bio, req) {
783
		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
784 785
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

K
Keith Busch 已提交
786 787 788 789 790
		if (n < segments) {
			range[n].cattr = cpu_to_le32(0);
			range[n].nlb = cpu_to_le32(nlb);
			range[n].slba = cpu_to_le64(slba);
		}
791 792 793 794
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
795 796 797 798
		if (virt_to_page(range) == ns->ctrl->discard_page)
			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
		else
			kfree(range);
799
		return BLK_STS_IOERR;
800
	}
M
Ming Lin 已提交
801

802
	memset(cmnd, 0, sizeof(*cmnd));
M
Ming Lin 已提交
803
	cmnd->dsm.opcode = nvme_cmd_dsm;
C
Christoph Hellwig 已提交
804
	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
805
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
M
Ming Lin 已提交
806 807
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

808 809
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
810
	req->special_vec.bv_len = alloc_size;
811
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
M
Ming Lin 已提交
812

813
	return BLK_STS_OK;
M
Ming Lin 已提交
814 815
}

816 817 818
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
{
819 820
	memset(cmnd, 0, sizeof(*cmnd));

821 822 823 824 825 826
	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		return nvme_setup_discard(ns, req, cmnd);

	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
	cmnd->write_zeroes.slba =
827
		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
828 829
	cmnd->write_zeroes.length =
		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
K
Klaus Jensen 已提交
830 831

	if (nvme_ns_has_pi(ns)) {
832
		cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
K
Klaus Jensen 已提交
833 834 835 836 837 838 839 840 841 842

		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			cmnd->write_zeroes.reftag =
				cpu_to_le32(t10_pi_ref_tag(req));
			break;
		}
	}

843 844 845
	return BLK_STS_OK;
}

846
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
K
Keith Busch 已提交
847 848
		struct request *req, struct nvme_command *cmnd,
		enum nvme_opcode op)
M
Ming Lin 已提交
849 850 851 852 853 854 855 856 857 858 859 860
{
	u16 control = 0;
	u32 dsmgmt = 0;

	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

K
Keith Busch 已提交
861
	cmnd->rw.opcode = op;
862
	cmnd->rw.flags = 0;
C
Christoph Hellwig 已提交
863
	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
864 865
	cmnd->rw.rsvd2 = 0;
	cmnd->rw.metadata = 0;
866
	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
M
Ming Lin 已提交
867
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
868 869 870
	cmnd->rw.reftag = 0;
	cmnd->rw.apptag = 0;
	cmnd->rw.appmask = 0;
M
Ming Lin 已提交
871 872

	if (ns->ms) {
873 874 875 876 877 878 879 880 881 882 883 884
		/*
		 * If formated with metadata, the block layer always provides a
		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
		 * we enable the PRACT bit for protection information or set the
		 * namespace capacity to zero to prevent any I/O.
		 */
		if (!blk_integrity_rq(req)) {
			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
				return BLK_STS_NOTSUPP;
			control |= NVME_RW_PRINFO_PRACT;
		}

M
Ming Lin 已提交
885 886 887 888 889 890 891 892
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
K
Keith Busch 已提交
893 894
			if (op == nvme_cmd_zone_append)
				control |= NVME_RW_APPEND_PIREMAP;
895
			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
M
Ming Lin 已提交
896 897 898 899 900 901
			break;
		}
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
902
	return 0;
M
Ming Lin 已提交
903 904
}

905 906 907
void nvme_cleanup_cmd(struct request *req)
{
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
M
Minwoo Im 已提交
908
		struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
909

C
Christoph Hellwig 已提交
910
		if (req->special_vec.bv_page == ctrl->discard_page)
M
Minwoo Im 已提交
911
			clear_bit_unlock(0, &ctrl->discard_page_busy);
912
		else
C
Christoph Hellwig 已提交
913
			kfree(bvec_virt(&req->special_vec));
914 915 916 917
	}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

918
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
M
Ming Lin 已提交
919
{
920
	struct nvme_command *cmd = nvme_req(req)->cmd;
921
	blk_status_t ret = BLK_STS_OK;
M
Ming Lin 已提交
922

923
	if (!(req->rq_flags & RQF_DONTPREP))
924
		nvme_clear_nvme_request(req);
925

926 927 928
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
929
		/* these are setup prior to execution in nvme_init_request() */
930 931
		break;
	case REQ_OP_FLUSH:
M
Ming Lin 已提交
932
		nvme_setup_flush(ns, cmd);
933
		break;
K
Keith Busch 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946
	case REQ_OP_ZONE_RESET_ALL:
	case REQ_OP_ZONE_RESET:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
		break;
	case REQ_OP_ZONE_OPEN:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
		break;
	case REQ_OP_ZONE_CLOSE:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
		break;
	case REQ_OP_ZONE_FINISH:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
		break;
947
	case REQ_OP_WRITE_ZEROES:
948 949
		ret = nvme_setup_write_zeroes(ns, req, cmd);
		break;
950
	case REQ_OP_DISCARD:
M
Ming Lin 已提交
951
		ret = nvme_setup_discard(ns, req, cmd);
952 953
		break;
	case REQ_OP_READ:
K
Keith Busch 已提交
954 955
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
		break;
956
	case REQ_OP_WRITE:
K
Keith Busch 已提交
957 958 959 960
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
		break;
	case REQ_OP_ZONE_APPEND:
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
961 962 963
		break;
	default:
		WARN_ON_ONCE(1);
964
		return BLK_STS_IOERR;
965
	}
M
Ming Lin 已提交
966

967
	cmd->common.command_id = nvme_cid(req);
K
Keith Busch 已提交
968
	trace_nvme_setup_cmd(req, cmd);
M
Ming Lin 已提交
969 970 971 972
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

973 974 975 976 977 978
/*
 * Return values:
 * 0:  success
 * >0: nvme controller's cqe status response
 * <0: kernel error in lieu of controller response
 */
979
static int nvme_execute_rq(struct request *rq, bool at_head)
980 981 982
{
	blk_status_t status;

983
	status = blk_execute_rq(rq, at_head);
984 985 986 987 988 989 990
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		return -EINTR;
	if (nvme_req(rq)->status)
		return nvme_req(rq)->status;
	return blk_status_to_errno(status);
}

991 992 993 994 995
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
996
		union nvme_result *result, void *buffer, unsigned bufflen,
997
		unsigned timeout, int qid, int at_head,
998
		blk_mq_req_flags_t flags)
999 1000 1001 1002
{
	struct request *req;
	int ret;

1003 1004 1005 1006
	if (qid == NVME_QID_ANY)
		req = nvme_alloc_request(q, cmd, flags);
	else
		req = nvme_alloc_request_qid(q, cmd, flags, qid);
1007 1008 1009
	if (IS_ERR(req))
		return PTR_ERR(req);

1010 1011
	if (timeout)
		req->timeout = timeout;
1012

1013 1014 1015 1016
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
1017 1018
	}

1019
	ret = nvme_execute_rq(req, at_head);
1020
	if (result && ret >= 0)
1021
		*result = nvme_req(req)->result;
1022 1023 1024 1025
 out:
	blk_mq_free_request(req);
	return ret;
}
1026
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1027 1028 1029 1030

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
1031
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1032
			NVME_QID_ANY, 0, 0);
1033
}
1034
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1035

1036 1037 1038 1039
static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
1040
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1041 1042
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
1043
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	default:
		break;
	}
	return 0;
}

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = 0;

	if (ns) {
		if (ns->head->effects)
			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1058 1059 1060
			dev_warn_once(ctrl->device,
				"IO command:%02x has unhandled effects:%08x\n",
				opcode, effects);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		return 0;
	}

	if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
	effects |= nvme_known_admin_effects(opcode);

	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);

static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			       u8 opcode)
{
	u32 effects = nvme_command_effects(ctrl, ns, opcode);

	/*
	 * For simplicity, IO to all namespaces is quiesced even if the command
	 * effects say only one namespace is affected.
	 */
1081
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		mutex_lock(&ctrl->scan_lock);
		mutex_lock(&ctrl->subsys->lock);
		nvme_mpath_start_freeze(ctrl->subsys);
		nvme_mpath_wait_freeze(ctrl->subsys);
		nvme_start_freeze(ctrl);
		nvme_wait_freeze(ctrl);
	}
	return effects;
}

1092 1093
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
			      struct nvme_command *cmd, int status)
1094
{
1095
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1096 1097 1098 1099 1100 1101 1102
		nvme_unfreeze(ctrl);
		nvme_mpath_unfreeze(ctrl->subsys);
		mutex_unlock(&ctrl->subsys->lock);
		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
		mutex_unlock(&ctrl->scan_lock);
	}
	if (effects & NVME_CMD_EFFECTS_CCC)
1103
		nvme_init_ctrl_finish(ctrl);
1104 1105 1106 1107
	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
		nvme_queue_scan(ctrl);
		flush_work(&ctrl->scan_work);
	}
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127

	switch (cmd->common.opcode) {
	case nvme_admin_set_features:
		switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
		case NVME_FEAT_KATO:
			/*
			 * Keep alive commands interval on the host should be
			 * updated when KATO is modified by Set Features
			 * commands.
			 */
			if (!status)
				nvme_update_keep_alive(ctrl, cmd);
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}
1128 1129
}

1130
int nvme_execute_passthru_rq(struct request *rq)
1131 1132 1133 1134 1135
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;
	u32 effects;
1136
	int  ret;
1137 1138

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1139
	ret = nvme_execute_rq(rq, false);
1140
	if (effects) /* nothing to be done for zero cmd effects */
1141
		nvme_passthru_end(ctrl, effects, cmd, ret);
1142 1143

	return ret;
1144 1145 1146
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);

H
Hannes Reinecke 已提交
1147 1148 1149 1150 1151 1152 1153
/*
 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
 * 
 *   The host should send Keep Alive commands at half of the Keep Alive Timeout
 *   accounting for transport roundtrip times [..].
 */
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1154
{
H
Hannes Reinecke 已提交
1155
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1156 1157
}

1158
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
S
Sagi Grimberg 已提交
1159 1160
{
	struct nvme_ctrl *ctrl = rq->end_io_data;
1161 1162
	unsigned long flags;
	bool startka = false;
S
Sagi Grimberg 已提交
1163 1164 1165

	blk_mq_free_request(rq);

1166
	if (status) {
S
Sagi Grimberg 已提交
1167
		dev_err(ctrl->device,
1168 1169
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
S
Sagi Grimberg 已提交
1170 1171 1172
		return;
	}

1173
	ctrl->comp_seen = false;
1174 1175 1176 1177 1178 1179
	spin_lock_irqsave(&ctrl->lock, flags);
	if (ctrl->state == NVME_CTRL_LIVE ||
	    ctrl->state == NVME_CTRL_CONNECTING)
		startka = true;
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
H
Hannes Reinecke 已提交
1180
		nvme_queue_keep_alive_work(ctrl);
S
Sagi Grimberg 已提交
1181 1182 1183 1184 1185 1186
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);
1187
	bool comp_seen = ctrl->comp_seen;
1188
	struct request *rq;
1189 1190 1191 1192 1193

	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
		dev_dbg(ctrl->device,
			"reschedule traffic based keep-alive timer\n");
		ctrl->comp_seen = false;
H
Hannes Reinecke 已提交
1194
		nvme_queue_keep_alive_work(ctrl);
1195 1196
		return;
	}
S
Sagi Grimberg 已提交
1197

1198
	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
1199
				BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1200
	if (IS_ERR(rq)) {
S
Sagi Grimberg 已提交
1201
		/* allocation failure, reset the controller */
1202
		dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1203
		nvme_reset_ctrl(ctrl);
S
Sagi Grimberg 已提交
1204 1205
		return;
	}
1206 1207 1208

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;
1209
	blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
S
Sagi Grimberg 已提交
1210 1211
}

1212
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
S
Sagi Grimberg 已提交
1213 1214 1215 1216
{
	if (unlikely(ctrl->kato == 0))
		return;

H
Hannes Reinecke 已提交
1217
	nvme_queue_keep_alive_work(ctrl);
S
Sagi Grimberg 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
				   struct nvme_command *cmd)
{
	unsigned int new_kato =
		DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);

	dev_info(ctrl->device,
		 "keep alive interval updated from %u ms to %u ms\n",
		 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);

	nvme_stop_keep_alive(ctrl);
	ctrl->kato = new_kato;
	nvme_start_keep_alive(ctrl);
}

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
/*
 * In NVMe 1.0 the CNS field was just a binary controller or namespace
 * flag, thus sending any new CNS opcodes has a big chance of not working.
 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
 * (but not for any later version).
 */
static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
{
	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
		return ctrl->vs < NVME_VS(1, 2, 0);
	return ctrl->vs < NVME_VS(1, 1, 0);
}

K
Keith Busch 已提交
1257
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1258 1259 1260 1261 1262 1263
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
1264
	c.identify.cns = NVME_ID_CNS_CTRL;
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

1277
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1278
		struct nvme_ns_id_desc *cur, bool *csi_seen)
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
{
	const char *warn_str = "ctrl returned bogus length:";
	void *data = cur;

	switch (cur->nidt) {
	case NVME_NIDT_EUI64:
		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
		return NVME_NIDT_EUI64_LEN;
	case NVME_NIDT_NGUID:
		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
		return NVME_NIDT_NGUID_LEN;
	case NVME_NIDT_UUID:
		if (cur->nidl != NVME_NIDT_UUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		uuid_copy(&ids->uuid, data + sizeof(*cur));
		return NVME_NIDT_UUID_LEN;
1308 1309 1310 1311 1312 1313 1314 1315 1316
	case NVME_NIDT_CSI:
		if (cur->nidl != NVME_NIDT_CSI_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
		*csi_seen = true;
		return NVME_NIDT_CSI_LEN;
1317 1318 1319 1320 1321 1322
	default:
		/* Skip unknown types */
		return cur->nidl;
	}
}

1323
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1324
		struct nvme_ns_ids *ids)
1325 1326
{
	struct nvme_command c = { };
1327 1328
	bool csi_seen = false;
	int status, pos, len;
1329 1330
	void *data;

1331 1332
	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
		return 0;
1333 1334 1335
	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
		return 0;

1336 1337 1338 1339 1340 1341 1342 1343
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

1344
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1345
				      NVME_IDENTIFY_DATA_SIZE);
1346 1347
	if (status) {
		dev_warn(ctrl->device,
1348 1349
			"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
			nsid, status);
1350
		goto free_data;
1351
	}
1352 1353 1354 1355 1356 1357 1358

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

1359
		len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1360
		if (len < 0)
1361
			break;
1362 1363 1364

		len += sizeof(*cur);
	}
1365 1366 1367 1368 1369 1370 1371

	if (nvme_multi_css(ctrl) && !csi_seen) {
		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
			 nsid);
		status = -EINVAL;
	}

1372 1373 1374 1375 1376
free_data:
	kfree(data);
	return status;
}

1377 1378
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1379 1380 1381 1382 1383
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1384 1385
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
1386
	c.identify.cns = NVME_ID_CNS_NS;
1387

1388 1389 1390
	*id = kmalloc(sizeof(**id), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;
1391

1392
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1393
	if (error) {
1394
		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1395
		goto out_free_id;
1396 1397
	}

1398
	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1399 1400
	if ((*id)->ncap == 0) /* namespace not allocated or attached */
		goto out_free_id;
1401 1402 1403 1404 1405 1406 1407 1408

	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
	    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
		memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
	if (ctrl->vs >= NVME_VS(1, 2, 0) &&
	    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
		memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));

1409 1410 1411 1412
	return 0;

out_free_id:
	kfree(*id);
1413
	return error;
1414 1415
}

K
Keith Busch 已提交
1416 1417
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1418
{
1419
	union nvme_result res = { 0 };
1420
	struct nvme_command c = { };
1421
	int ret;
1422

K
Keith Busch 已提交
1423
	c.features.opcode = op;
1424 1425 1426
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1427
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1428
			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1429
	if (ret >= 0 && result)
1430
		*result = le32_to_cpu(res.u32);
1431
	return ret;
1432 1433
}

K
Keith Busch 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_set_features);

int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_get_features);

C
Christoph Hellwig 已提交
1452 1453 1454 1455 1456 1457
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

1458
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
C
Christoph Hellwig 已提交
1459
			&result);
1460
	if (status < 0)
C
Christoph Hellwig 已提交
1461 1462
		return status;

1463 1464 1465 1466 1467 1468
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
1469
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1470 1471 1472 1473 1474 1475
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

C
Christoph Hellwig 已提交
1476 1477
	return 0;
}
1478
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
C
Christoph Hellwig 已提交
1479

1480
#define NVME_AEN_SUPPORTED \
1481 1482
	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1483 1484 1485

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
1486
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1487 1488
	int status;

1489 1490 1491 1492 1493
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
1494 1495
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1496
			 supported_aens);
1497 1498

	queue_work(nvme_wq, &ctrl->async_event_work);
1499 1500
}

1501
static int nvme_ns_open(struct nvme_ns *ns)
1502 1503
{

1504
	/* should never be called due to GENHD_FL_HIDDEN */
1505
	if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1506
		goto fail;
K
Kanchan Joshi 已提交
1507
	if (!nvme_get_ns(ns))
1508 1509 1510 1511
		goto fail;
	if (!try_module_get(ns->ctrl->ops->module))
		goto fail_put_ns;

C
Christoph Hellwig 已提交
1512
	return 0;
1513 1514 1515 1516 1517

fail_put_ns:
	nvme_put_ns(ns);
fail:
	return -ENXIO;
1518 1519
}

1520
static void nvme_ns_release(struct nvme_ns *ns)
1521
{
1522 1523 1524

	module_put(ns->ctrl->ops->module);
	nvme_put_ns(ns);
1525 1526
}

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
	return nvme_ns_open(bdev->bd_disk->private_data);
}

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
	nvme_ns_release(disk->private_data);
}

1537
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1538 1539 1540 1541 1542 1543 1544 1545 1546
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
	return 0;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
1547 1548
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1549
{
1550
	struct blk_integrity integrity = { };
1551

1552
	switch (pi_type) {
1553 1554
	case NVME_NS_DPS_PI_TYPE3:
		integrity.profile = &t10_pi_type3_crc;
1555 1556
		integrity.tag_size = sizeof(u16) + sizeof(u32);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1557 1558 1559 1560
		break;
	case NVME_NS_DPS_PI_TYPE1:
	case NVME_NS_DPS_PI_TYPE2:
		integrity.profile = &t10_pi_type1_crc;
1561 1562
		integrity.tag_size = sizeof(u16);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1563 1564 1565 1566 1567
		break;
	default:
		integrity.profile = NULL;
		break;
	}
1568 1569
	integrity.tuple_size = ms;
	blk_integrity_register(disk, &integrity);
1570
	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1571 1572
}
#else
1573 1574
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1575 1576 1577 1578
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

1579
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1580
{
1581
	struct nvme_ctrl *ctrl = ns->ctrl;
1582
	struct request_queue *queue = disk->queue;
1583 1584
	u32 size = queue_logical_block_size(queue);

1585
	if (ctrl->max_discard_sectors == 0) {
1586 1587 1588 1589
		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
		return;
	}

1590 1591 1592
	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

1593
	queue->limits.discard_alignment = 0;
1594
	queue->limits.discard_granularity = size;
1595

1596 1597 1598 1599
	/* If discard is already enabled, don't reset queue limits */
	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
		return;

1600 1601
	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1602 1603

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1604
		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1605 1606
}

1607 1608 1609 1610
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
	return uuid_equal(&a->uuid, &b->uuid) &&
		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1611 1612
		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
		a->csi == b->csi;
1613 1614
}

1615
static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
{
	struct nvme_ctrl *ctrl = ns->ctrl;

	/*
	 * The PI implementation requires the metadata size to be equal to the
	 * t10 pi tuple size.
	 */
	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
	if (ns->ms == sizeof(struct t10_pi_tuple))
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;

	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1631 1632
		return;

1633 1634 1635 1636 1637 1638 1639
	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * The NVMe over Fabrics specification only supports metadata as
		 * part of the extended data LBA.  We rely on HCA/HBA support to
		 * remap the separate metadata buffer from the block layer.
		 */
		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1640
			return;
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654

		ns->features |= NVME_NS_EXT_LBAS;

		/*
		 * The current fabrics transport drivers support namespace
		 * metadata formats only if nvme_ns_has_pi() returns true.
		 * Suppress support for all other formats so the namespace will
		 * have a 0 capacity and not be usable through the block stack.
		 *
		 * Note, this check will need to be modified if any drivers
		 * gain the ability to use other metadata formats.
		 */
		if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
			ns->features |= NVME_NS_METADATA_SUPPORTED;
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	} else {
		/*
		 * For PCIe controllers, we can't easily remap the separate
		 * metadata buffer from the block layer and thus require a
		 * separate metadata buffer for block layer metadata/PI support.
		 * We allow extended LBAs for the passthrough interface, though.
		 */
		if (id->flbas & NVME_NS_FLBAS_META_EXT)
			ns->features |= NVME_NS_EXT_LBAS;
		else
			ns->features |= NVME_NS_METADATA_SUPPORTED;
	}
}

1669 1670 1671
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
1672
	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686

	if (ctrl->max_hw_sectors) {
		u32 max_segments =
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;

		max_segments = min_not_zero(max_segments, ctrl->max_segments);
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
	}
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
	blk_queue_dma_alignment(q, 7);
	blk_queue_write_cache(q, vwc, vwc);
}

1687 1688 1689
static void nvme_update_disk_info(struct gendisk *disk,
		struct nvme_ns *ns, struct nvme_id_ns *id)
{
1690
	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1691
	unsigned short bs = 1 << ns->lba_shift;
D
Damien Le Moal 已提交
1692
	u32 atomic_bs, phys_bs, io_opt = 0;
1693

1694 1695 1696 1697
	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
1698
	if (ns->lba_shift > PAGE_SHIFT) {
1699
		capacity = 0;
1700 1701
		bs = (1 << 9);
	}
1702

1703 1704
	blk_integrity_unregister(disk);

D
Damien Le Moal 已提交
1705
	atomic_bs = phys_bs = bs;
1706 1707 1708 1709 1710 1711
	if (id->nabo == 0) {
		/*
		 * Bit 1 indicates whether NAWUPF is defined for this namespace
		 * and whether it should be used instead of AWUPF. If NAWUPF ==
		 * 0 then AWUPF must be used instead.
		 */
1712
		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1713 1714 1715 1716
			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
		else
			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
	}
K
Keith Busch 已提交
1717

1718
	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1719
		/* NPWG = Namespace Preferred Write Granularity */
K
Keith Busch 已提交
1720
		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1721
		/* NOWS = Namespace Optimal Write Size */
K
Keith Busch 已提交
1722
		io_opt = bs * (1 + le16_to_cpu(id->nows));
1723 1724
	}

1725
	blk_queue_logical_block_size(disk->queue, bs);
1726 1727 1728 1729 1730 1731 1732 1733
	/*
	 * Linux filesystems assume writing a single physical block is
	 * an atomic operation. Hence limit the physical block size to the
	 * value of the Atomic Write Unit Power Fail parameter.
	 */
	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
	blk_queue_io_min(disk->queue, phys_bs);
	blk_queue_io_opt(disk->queue, io_opt);
1734

1735 1736 1737 1738 1739 1740 1741 1742 1743
	/*
	 * Register a metadata profile for PI, or the plain non-integrity NVMe
	 * metadata masquerading as Type 0 if supported, otherwise reject block
	 * I/O to namespaces with metadata except when the namespace supports
	 * PI, as it can strip/insert in that case.
	 */
	if (ns->ms) {
		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1744 1745
			nvme_init_integrity(disk, ns->ms, ns->pi_type,
					    ns->ctrl->max_integrity_segments);
1746 1747 1748 1749
		else if (!nvme_ns_has_pi(ns))
			capacity = 0;
	}

1750
	set_capacity_and_notify(disk, capacity);
1751

1752
	nvme_config_discard(disk, ns);
1753 1754
	blk_queue_max_write_zeroes_sectors(disk->queue,
					   ns->ctrl->max_zeroes_sectors);
1755

1756 1757
	set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
		test_bit(NVME_NS_FORCE_RO, &ns->flags));
1758 1759
}

1760 1761 1762
static inline bool nvme_first_scan(struct gendisk *disk)
{
	/* nvme_alloc_ns() scans the disk prior to adding it */
C
Christoph Hellwig 已提交
1763
	return !disk_live(disk);
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
}

static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	u32 iob;

	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
	    is_power_of_2(ctrl->max_hw_sectors))
		iob = ctrl->max_hw_sectors;
	else
		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));

	if (!iob)
		return;

	if (!is_power_of_2(iob)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
				ns->disk->disk_name, iob);
		return;
	}

	if (blk_queue_is_zoned(ns->disk->queue)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring zoned namespace IO boundary\n",
				ns->disk->disk_name);
		return;
	}

	blk_queue_chunk_sectors(ns->queue, iob);
}

1797
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1798
{
K
Keith Busch 已提交
1799 1800
	unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
	int ret;
1801

1802
	blk_mq_freeze_queue(ns->disk->queue);
K
Keith Busch 已提交
1803
	ns->lba_shift = id->lbaf[lbaf].ds;
1804
	nvme_set_queue_limits(ns->ctrl, ns->queue);
1805

1806
	nvme_configure_metadata(ns, id);
1807 1808 1809
	nvme_set_chunk_sectors(ns, id);
	nvme_update_disk_info(ns->disk, ns, id);

1810
	if (ns->head->ids.csi == NVME_CSI_ZNS) {
1811
		ret = nvme_update_zone_info(ns, lbaf);
1812
		if (ret)
1813
			goto out_unfreeze;
1814 1815
	}

1816
	set_bit(NVME_NS_READY, &ns->flags);
1817
	blk_mq_unfreeze_queue(ns->disk->queue);
1818

1819 1820
	if (blk_queue_is_zoned(ns->queue)) {
		ret = nvme_revalidate_zones(ns);
1821
		if (ret && !nvme_first_scan(ns->disk))
1822
			return ret;
1823 1824
	}

1825
	if (nvme_ns_head_multipath(ns->head)) {
1826
		blk_mq_freeze_queue(ns->head->disk->queue);
1827
		nvme_update_disk_info(ns->head->disk, ns, id);
1828
		nvme_mpath_revalidate_paths(ns);
1829 1830
		blk_stack_limits(&ns->head->disk->queue->limits,
				 &ns->queue->limits, 0);
1831
		disk_update_readahead(ns->head->disk);
1832
		blk_mq_unfreeze_queue(ns->head->disk->queue);
1833
	}
1834
	return 0;
1835

1836
out_unfreeze:
1837 1838 1839 1840 1841 1842
	/*
	 * If probing fails due an unsupported feature, hide the block device,
	 * but still allow other access.
	 */
	if (ret == -ENODEV) {
		ns->disk->flags |= GENHD_FL_HIDDEN;
1843
		set_bit(NVME_NS_READY, &ns->flags);
1844 1845
		ret = 0;
	}
1846
	blk_mq_unfreeze_queue(ns->disk->queue);
K
Keith Busch 已提交
1847 1848 1849
	return ret;
}

1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
static char nvme_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 1;
	case PR_EXCLUSIVE_ACCESS:
		return 2;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 3;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 4;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 5;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 6;
	default:
		return 0;
	}
1868
}
1869

1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
		struct nvme_command *c, u8 data[16])
{
	struct nvme_ns_head *head = bdev->bd_disk->private_data;
	int srcu_idx = srcu_read_lock(&head->srcu);
	struct nvme_ns *ns = nvme_find_path(head);
	int ret = -EWOULDBLOCK;

	if (ns) {
		c->common.nsid = cpu_to_le32(ns->head->ns_id);
		ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
	}
	srcu_read_unlock(&head->srcu, srcu_idx);
	return ret;
}
	
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
		u8 data[16])
{
	c->common.nsid = cpu_to_le32(ns->head->ns_id);
	return nvme_submit_sync_cmd(ns->queue, c, data, 16);
}

1893 1894 1895
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
				u64 key, u64 sa_key, u8 op)
{
1896
	struct nvme_command c = { };
1897 1898 1899 1900 1901 1902
	u8 data[16] = { 0, };

	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	c.common.opcode = op;
1903
	c.common.cdw10 = cpu_to_le32(cdw10);
1904

1905 1906 1907 1908
	if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
	    bdev->bd_disk->fops == &nvme_ns_head_ops)
		return nvme_send_ns_head_pr_command(bdev, &c, data);
	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
}

static int nvme_pr_register(struct block_device *bdev, u64 old,
		u64 new, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = old ? 2 : 0;
	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
}

static int nvme_pr_reserve(struct block_device *bdev, u64 key,
		enum pr_type type, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = nvme_pr_type(type) << 8;
	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
}

static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
{
1941
	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
1942

1943 1944 1945 1946 1947
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}

static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
1948
	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1949

1950 1951 1952 1953 1954
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
}

static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
1955
	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
1956

1957 1958 1959
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}

1960
const struct pr_ops nvme_pr_ops = {
1961 1962 1963 1964 1965 1966 1967
	.pr_register	= nvme_pr_register,
	.pr_reserve	= nvme_pr_reserve,
	.pr_release	= nvme_pr_release,
	.pr_preempt	= nvme_pr_preempt,
	.pr_clear	= nvme_pr_clear,
};

1968
#ifdef CONFIG_BLK_SED_OPAL
1969 1970
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
1971
{
1972
	struct nvme_ctrl *ctrl = data;
1973
	struct nvme_command cmd = { };
1974 1975 1976 1977 1978 1979

	if (send)
		cmd.common.opcode = nvme_admin_security_send;
	else
		cmd.common.opcode = nvme_admin_security_recv;
	cmd.common.nsid = 0;
1980 1981
	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
	cmd.common.cdw11 = cpu_to_le32(len);
1982

1983
	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
1984
			NVME_QID_ANY, 1, 0);
1985 1986 1987 1988
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */

1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_report_zones(struct gendisk *disk, sector_t sector,
		unsigned int nr_zones, report_zones_cb cb, void *data)
{
	return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
			data);
}
#else
#define nvme_report_zones	NULL
#endif /* CONFIG_BLK_DEV_ZONED */

J
Javier González 已提交
2000
static const struct block_device_operations nvme_bdev_ops = {
2001 2002 2003 2004 2005
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
	.open		= nvme_open,
	.release	= nvme_release,
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
2006
	.report_zones	= nvme_report_zones,
2007 2008 2009
	.pr_ops		= &nvme_pr_ops,
};

2010 2011 2012 2013 2014 2015 2016 2017
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
	unsigned long timeout =
		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
	int ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
K
Keith Busch 已提交
2018 2019
		if (csts == ~0)
			return -ENODEV;
2020 2021 2022
		if ((csts & NVME_CSTS_RDY) == bit)
			break;

2023
		usleep_range(1000, 2000);
2024 2025 2026
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2027
			dev_err(ctrl->device,
2028 2029
				"Device not ready; aborting %s, CSTS=0x%x\n",
				enabled ? "initialisation" : "reset", csts);
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
			return -ENODEV;
		}
	}

	return ret;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
2043
int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2044 2045 2046 2047 2048 2049 2050 2051 2052
{
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config &= ~NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2053

2054
	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2055 2056
		msleep(NVME_QUIRK_DELAY_AMOUNT);

2057
	return nvme_wait_ready(ctrl, ctrl->cap, false);
2058
}
2059
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2060

2061
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2062
{
2063
	unsigned dev_page_min;
2064 2065
	int ret;

2066 2067 2068 2069 2070 2071 2072
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
	if (ret) {
		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
		return ret;
	}
	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;

2073
	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2074
		dev_err(ctrl->device,
2075
			"Minimum device page size %u too large for host (%u)\n",
2076
			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2077 2078 2079
		return -ENODEV;
	}

2080 2081 2082 2083
	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
		ctrl->ctrl_config = NVME_CC_CSS_CSI;
	else
		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2084
	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2085
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2086 2087 2088 2089 2090 2091
	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
	ctrl->ctrl_config |= NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2092
	return nvme_wait_ready(ctrl, ctrl->cap, true);
2093
}
2094
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2095 2096 2097

int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
2098
	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	u32 csts;
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2117
			dev_err(ctrl->device,
2118 2119 2120 2121 2122 2123 2124
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return ret;
}
2125
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2126

2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
	int ret;

	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
		return 0;

	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
			NULL);
	if (ret)
		dev_warn_once(ctrl->device,
			"could not set timestamp (%d)\n", ret);
	return ret;
}

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
static int nvme_configure_acre(struct nvme_ctrl *ctrl)
{
	struct nvme_feat_host_behavior *host;
	int ret;

	/* Don't bother enabling the feature if retry delay is not reported */
	if (!ctrl->crdt[0])
		return 0;

	host = kzalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
		return 0;

	host->acre = NVME_ENABLE_ACRE;
	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
				host, sizeof(*host), NULL);
	kfree(host);
	return ret;
}

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
/*
 * The function checks whether the given total (exlat + enlat) latency of
 * a power state allows the latter to be used as an APST transition target.
 * It does so by comparing the latency to the primary and secondary latency
 * tolerances defined by module params. If there's a match, the corresponding
 * timeout value is returned and the matching tolerance index (1 or 2) is
 * reported.
 */
static bool nvme_apst_get_transition_time(u64 total_latency,
		u64 *transition_time, unsigned *last_index)
{
	if (total_latency <= apst_primary_latency_tol_us) {
		if (*last_index == 1)
			return false;
		*last_index = 1;
		*transition_time = apst_primary_timeout_ms;
		return true;
	}
	if (apst_secondary_timeout_ms &&
		total_latency <= apst_secondary_latency_tol_us) {
		if (*last_index <= 2)
			return false;
		*last_index = 2;
		*transition_time = apst_secondary_timeout_ms;
		return true;
	}
	return false;
}

2193 2194 2195
/*
 * APST (Autonomous Power State Transition) lets us program a table of power
 * state transitions that the controller will perform automatically.
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
 *
 * Depending on module params, one of the two supported techniques will be used:
 *
 * - If the parameters provide explicit timeouts and tolerances, they will be
 *   used to build a table with up to 2 non-operational states to transition to.
 *   The default parameter values were selected based on the values used by
 *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
 *   regeneration of the APST table in the event of switching between external
 *   and battery power, the timeouts and tolerances reflect a compromise
 *   between values used by Microsoft for AC and battery scenarios.
 * - If not, we'll configure the table with a simple heuristic: we are willing
 *   to spend at most 2% of the time transitioning between power states.
 *   Therefore, when running in any given state, we will enter the next
 *   lower-power non-operational state after waiting 50 * (enlat + exlat)
 *   microseconds, as long as that state's exit latency is under the requested
 *   maximum latency.
2212 2213 2214 2215 2216 2217
 *
 * We will not autonomously enter any non-operational state for which the total
 * latency exceeds ps_max_latency_us.
 *
 * Users can set ps_max_latency_us to zero to turn off APST.
 */
2218
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2219 2220
{
	struct nvme_feat_auto_pst *table;
2221
	unsigned apste = 0;
2222
	u64 max_lat_us = 0;
2223
	__le64 target = 0;
2224
	int max_ps = -1;
2225
	int state;
2226
	int ret;
2227
	unsigned last_lt_index = UINT_MAX;
2228 2229 2230 2231 2232 2233

	/*
	 * If APST isn't supported or if we haven't been initialized yet,
	 * then don't do anything.
	 */
	if (!ctrl->apsta)
2234
		return 0;
2235 2236 2237

	if (ctrl->npss > 31) {
		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2238
		return 0;
2239 2240 2241 2242
	}

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
2243
		return 0;
2244

2245
	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2246
		/* Turn off APST. */
2247
		dev_dbg(ctrl->device, "APST disabled\n");
2248 2249
		goto done;
	}
2250

2251 2252 2253 2254 2255 2256 2257 2258
	/*
	 * Walk through all states from lowest- to highest-power.
	 * According to the spec, lower-numbered states use more power.  NPSS,
	 * despite the name, is the index of the lowest-power state, not the
	 * number of states.
	 */
	for (state = (int)ctrl->npss; state >= 0; state--) {
		u64 total_latency_us, exit_latency_us, transition_ms;
2259

2260 2261
		if (target)
			table->entries[state] = target;
2262 2263

		/*
2264 2265
		 * Don't allow transitions to the deepest state if it's quirked
		 * off.
2266
		 */
2267 2268 2269
		if (state == ctrl->npss &&
		    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
			continue;
2270

2271 2272 2273 2274 2275 2276
		/*
		 * Is this state a useful non-operational state for higher-power
		 * states to autonomously transition to?
		 */
		if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
			continue;
2277

2278 2279 2280
		exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
		if (exit_latency_us > ctrl->ps_max_latency_us)
			continue;
2281

2282 2283
		total_latency_us = exit_latency_us +
			le32_to_cpu(ctrl->psd[state].entry_lat);
2284

2285
		/*
2286 2287
		 * This state is good. It can be used as the APST idle target
		 * for higher power states.
2288
		 */
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
			if (!nvme_apst_get_transition_time(total_latency_us,
					&transition_ms, &last_lt_index))
				continue;
		} else {
			transition_ms = total_latency_us + 19;
			do_div(transition_ms, 20);
			if (transition_ms > (1 << 24) - 1)
				transition_ms = (1 << 24) - 1;
		}
2299 2300 2301 2302 2303 2304

		target = cpu_to_le64((state << 3) | (transition_ms << 8));
		if (max_ps == -1)
			max_ps = state;
		if (total_latency_us > max_lat_us)
			max_lat_us = total_latency_us;
2305 2306
	}

2307 2308 2309 2310 2311 2312 2313 2314
	if (max_ps == -1)
		dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
	else
		dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
			max_ps, max_lat_us, (int)sizeof(*table), table);
	apste = 1;

done:
2315 2316 2317 2318 2319
	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
				table, sizeof(*table), NULL);
	if (ret)
		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
	kfree(table);
2320
	return ret;
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	u64 latency;

	switch (val) {
	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
	case PM_QOS_LATENCY_ANY:
		latency = U64_MAX;
		break;

	default:
		latency = val;
	}

	if (ctrl->ps_max_latency_us != latency) {
		ctrl->ps_max_latency_us = latency;
2340 2341
		if (ctrl->state == NVME_CTRL_LIVE)
			nvme_configure_apst(ctrl);
2342 2343 2344
	}
}

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
struct nvme_core_quirk_entry {
	/*
	 * NVMe model and firmware strings are padded with spaces.  For
	 * simplicity, strings in the quirk table are padded with NULLs
	 * instead.
	 */
	u16 vid;
	const char *mn;
	const char *fr;
	unsigned long quirks;
};

static const struct nvme_core_quirk_entry core_quirks[] = {
2358
	{
2359 2360 2361 2362 2363 2364
		/*
		 * This Toshiba device seems to die using any APST states.  See:
		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
		 */
		.vid = 0x1179,
		.mn = "THNSF5256GPUK TOSHIBA",
2365
		.quirks = NVME_QUIRK_NO_APST,
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
	},
	{
		/*
		 * This LiteON CL1-3D*-Q11 firmware version has a race
		 * condition associated with actions related to suspend to idle
		 * LiteON has resolved the problem in future firmware
		 */
		.vid = 0x14a4,
		.fr = "22301111",
		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
	},
	{
		/*
		 * This Kioxia CD6-V Series / HPE PE8030 device times out and
		 * aborts I/O during any load, but more easily reproducible
		 * with discards (fstrim).
		 *
		 * The device is left in a state where it is also not possible
		 * to use "nvme set-feature" to disable APST, but booting with
		 * nvme_core.default_ps_max_latency=0 works.
		 */
		.vid = 0x1e0f,
		.mn = "KCD6XVUL6T40",
		.quirks = NVME_QUIRK_NO_APST,
2390
	}
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
};

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{
	size_t matchlen;

	if (!match)
		return true;

	matchlen = strlen(match);
	WARN_ON_ONCE(matchlen > len);

	if (memcmp(idstr, match, matchlen))
		return false;

	for (; matchlen < len; matchlen++)
		if (idstr[matchlen] != ' ')
			return false;

	return true;
}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{
	return q->vid == le16_to_cpu(id->vid) &&
		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
		string_matches(id->fr, q->fr, sizeof(id->fr));
}

C
Christoph Hellwig 已提交
2422 2423
static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
2424 2425 2426 2427
{
	size_t nqnlen;
	int off;

2428 2429 2430 2431 2432 2433
	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
			return;
		}
2434

2435 2436 2437
		if (ctrl->vs >= NVME_VS(1, 2, 1))
			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
	}
2438 2439

	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
C
Christoph Hellwig 已提交
2440
	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2441
			"nqn.2014.08.org.nvmexpress:%04x%04x",
2442
			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
C
Christoph Hellwig 已提交
2443
	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2444
	off += sizeof(id->sn);
C
Christoph Hellwig 已提交
2445
	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2446
	off += sizeof(id->mn);
C
Christoph Hellwig 已提交
2447 2448 2449
	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}

2450
static void nvme_release_subsystem(struct device *dev)
C
Christoph Hellwig 已提交
2451
{
2452 2453 2454
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2455
	if (subsys->instance >= 0)
2456
		ida_free(&nvme_instance_ida, subsys->instance);
C
Christoph Hellwig 已提交
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
	kfree(subsys);
}

static void nvme_destroy_subsystem(struct kref *ref)
{
	struct nvme_subsystem *subsys =
			container_of(ref, struct nvme_subsystem, ref);

	mutex_lock(&nvme_subsystems_lock);
	list_del(&subsys->entry);
	mutex_unlock(&nvme_subsystems_lock);

C
Christoph Hellwig 已提交
2469
	ida_destroy(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
	device_del(&subsys->dev);
	put_device(&subsys->dev);
}

static void nvme_put_subsystem(struct nvme_subsystem *subsys)
{
	kref_put(&subsys->ref, nvme_destroy_subsystem);
}

static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
{
	struct nvme_subsystem *subsys;

	lockdep_assert_held(&nvme_subsystems_lock);

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
	/*
	 * Fail matches for discovery subsystems. This results
	 * in each discovery controller bound to a unique subsystem.
	 * This avoids issues with validating controller values
	 * that can only be true when there is a single unique subsystem.
	 * There may be multiple and completely independent entities
	 * that provide discovery controllers.
	 */
	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
		return NULL;

C
Christoph Hellwig 已提交
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
	list_for_each_entry(subsys, &nvme_subsystems, entry) {
		if (strcmp(subsys->subnqn, subsysnqn))
			continue;
		if (!kref_get_unless_zero(&subsys->ref))
			continue;
		return subsys;
	}

	return NULL;
}

2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
#define SUBSYS_ATTR_RO(_name, _mode, _show)			\
	struct device_attribute subsys_attr_##_name = \
		__ATTR(_name, _mode, _show, NULL)

static ssize_t nvme_subsys_show_nqn(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2518
	return sysfs_emit(buf, "%s\n", subsys->subnqn);
2519 2520 2521
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
static ssize_t nvme_subsys_show_type(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

	switch (subsys->subtype) {
	case NVME_NQN_DISC:
		return sysfs_emit(buf, "discovery\n");
	case NVME_NQN_NVME:
		return sysfs_emit(buf, "nvm\n");
	default:
		return sysfs_emit(buf, "reserved\n");
	}
}
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);

2540 2541 2542 2543 2544 2545
#define nvme_subsys_show_str_function(field)				\
static ssize_t subsys_##field##_show(struct device *dev,		\
			    struct device_attribute *attr, char *buf)	\
{									\
	struct nvme_subsystem *subsys =					\
		container_of(dev, struct nvme_subsystem, dev);		\
2546 2547
	return sysfs_emit(buf, "%.*s\n",				\
			   (int)sizeof(subsys->field), subsys->field);	\
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
}									\
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);

nvme_subsys_show_str_function(model);
nvme_subsys_show_str_function(serial);
nvme_subsys_show_str_function(firmware_rev);

static struct attribute *nvme_subsys_attrs[] = {
	&subsys_attr_model.attr,
	&subsys_attr_serial.attr,
	&subsys_attr_firmware_rev.attr,
	&subsys_attr_subsysnqn.attr,
2560
	&subsys_attr_subsystype.attr,
2561 2562 2563
#ifdef CONFIG_NVME_MULTIPATH
	&subsys_attr_iopolicy.attr,
#endif
2564 2565 2566
	NULL,
};

2567
static const struct attribute_group nvme_subsys_attrs_group = {
2568 2569 2570 2571 2572 2573 2574 2575
	.attrs = nvme_subsys_attrs,
};

static const struct attribute_group *nvme_subsys_attrs_groups[] = {
	&nvme_subsys_attrs_group,
	NULL,
};

2576 2577 2578 2579 2580
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{
	return ctrl->opts && ctrl->opts->discovery_nqn;
}

2581 2582
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2583
{
2584
	struct nvme_ctrl *tmp;
2585

2586 2587
	lockdep_assert_held(&nvme_subsystems_lock);

2588
	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2589
		if (nvme_state_terminal(tmp))
2590 2591 2592 2593
			continue;

		if (tmp->cntlid == ctrl->cntlid) {
			dev_err(ctrl->device,
2594 2595 2596
				"Duplicate cntlid %u with %s, subsys %s, rejecting\n",
				ctrl->cntlid, dev_name(tmp->device),
				subsys->subnqn);
2597 2598
			return false;
		}
2599

2600
		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2601
		    nvme_discovery_ctrl(ctrl))
2602 2603 2604 2605 2606
			continue;

		dev_err(ctrl->device,
			"Subsystem does not support multiple controllers\n");
		return false;
2607 2608
	}

2609
	return true;
2610 2611
}

C
Christoph Hellwig 已提交
2612 2613 2614 2615 2616 2617 2618 2619
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	struct nvme_subsystem *subsys, *found;
	int ret;

	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
	if (!subsys)
		return -ENOMEM;
2620 2621

	subsys->instance = -1;
C
Christoph Hellwig 已提交
2622 2623 2624
	mutex_init(&subsys->lock);
	kref_init(&subsys->ref);
	INIT_LIST_HEAD(&subsys->ctrls);
C
Christoph Hellwig 已提交
2625
	INIT_LIST_HEAD(&subsys->nsheads);
C
Christoph Hellwig 已提交
2626 2627 2628 2629 2630 2631
	nvme_init_subnqn(subsys, ctrl, id);
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->cmic = id->cmic;
2632 2633 2634 2635 2636 2637 2638 2639

	/* Versions prior to 1.4 don't necessarily report a valid type */
	if (id->cntrltype == NVME_CTRL_DISC ||
	    !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
		subsys->subtype = NVME_NQN_DISC;
	else
		subsys->subtype = NVME_NQN_NVME;

2640 2641 2642 2643 2644 2645 2646
	if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
		dev_err(ctrl->device,
			"Subsystem %s is not a discovery controller",
			subsys->subnqn);
		kfree(subsys);
		return -EINVAL;
	}
2647
	subsys->awupf = le16_to_cpu(id->awupf);
2648
	nvme_mpath_default_iopolicy(subsys);
C
Christoph Hellwig 已提交
2649 2650 2651

	subsys->dev.class = nvme_subsys_class;
	subsys->dev.release = nvme_release_subsystem;
2652
	subsys->dev.groups = nvme_subsys_attrs_groups;
2653
	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
C
Christoph Hellwig 已提交
2654 2655 2656 2657 2658
	device_initialize(&subsys->dev);

	mutex_lock(&nvme_subsystems_lock);
	found = __nvme_find_get_subsystem(subsys->subnqn);
	if (found) {
2659
		put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2660
		subsys = found;
2661

2662
		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
C
Christoph Hellwig 已提交
2663
			ret = -EINVAL;
2664
			goto out_put_subsystem;
C
Christoph Hellwig 已提交
2665 2666 2667 2668 2669 2670
		}
	} else {
		ret = device_add(&subsys->dev);
		if (ret) {
			dev_err(ctrl->device,
				"failed to register subsystem device.\n");
2671
			put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2672 2673
			goto out_unlock;
		}
C
Christoph Hellwig 已提交
2674
		ida_init(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2675 2676 2677
		list_add_tail(&subsys->entry, &nvme_subsystems);
	}

2678 2679 2680
	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
				dev_name(ctrl->device));
	if (ret) {
C
Christoph Hellwig 已提交
2681 2682
		dev_err(ctrl->device,
			"failed to create sysfs link from subsystem.\n");
2683
		goto out_put_subsystem;
C
Christoph Hellwig 已提交
2684 2685
	}

2686 2687
	if (!found)
		subsys->instance = ctrl->instance;
2688
	ctrl->subsys = subsys;
C
Christoph Hellwig 已提交
2689
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2690
	mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
2691 2692
	return 0;

2693 2694
out_put_subsystem:
	nvme_put_subsystem(subsys);
C
Christoph Hellwig 已提交
2695 2696 2697
out_unlock:
	mutex_unlock(&nvme_subsystems_lock);
	return ret;
2698 2699
}

2700
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2701
		void *log, size_t size, u64 offset)
K
Keith Busch 已提交
2702 2703
{
	struct nvme_command c = { };
K
Keith Busch 已提交
2704
	u32 dwlen = nvme_bytes_to_numd(size);
2705 2706

	c.get_log_page.opcode = nvme_admin_get_log_page;
2707
	c.get_log_page.nsid = cpu_to_le32(nsid);
2708
	c.get_log_page.lid = log_page;
2709
	c.get_log_page.lsp = lsp;
2710 2711
	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2712 2713
	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2714
	c.get_log_page.csi = csi;
K
Keith Busch 已提交
2715 2716 2717 2718

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}

2719 2720
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
				struct nvme_effects_log **log)
2721
{
2722
	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
2723 2724
	int ret;

2725 2726
	if (cel)
		goto out;
2727

2728 2729 2730
	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
	if (!cel)
		return -ENOMEM;
2731

2732
	ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2733
			cel, sizeof(*cel), 0);
2734
	if (ret) {
2735 2736
		kfree(cel);
		return ret;
2737
	}
2738

2739
	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2740
out:
2741
	*log = cel;
2742
	return 0;
2743 2744
}

2745
static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2746
{
2747
	u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2748

2749 2750 2751
	if (check_shl_overflow(1U, units + page_shift - 9, &val))
		return UINT_MAX;
	return val;
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
}

static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
{
	struct nvme_command c = { };
	struct nvme_id_ctrl_nvm *id;
	int ret;

	if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
		ctrl->max_discard_sectors = UINT_MAX;
		ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
	} else {
		ctrl->max_discard_sectors = 0;
		ctrl->max_discard_segments = 0;
2766
	}
2767

2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
	/*
	 * Even though NVMe spec explicitly states that MDTS is not applicable
	 * to the write-zeroes, we are cautious and limit the size to the
	 * controllers max_hw_sectors value, which is based on the MDTS field
	 * and possibly other limiting factors.
	 */
	if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
	    !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
		ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
	else
		ctrl->max_zeroes_sectors = 0;

	if (nvme_ctrl_limited_cns(ctrl))
		return 0;

	id = kzalloc(sizeof(*id), GFP_KERNEL);
	if (!id)
		return 0;

	c.identify.opcode = nvme_admin_identify;
	c.identify.cns = NVME_ID_CNS_CS_CTRL;
	c.identify.csi = NVME_CSI_NVM;

	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
	if (ret)
		goto free_data;

	if (id->dmrl)
		ctrl->max_discard_segments = id->dmrl;
	if (id->dmrsl)
		ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
	if (id->wzsl)
		ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);

free_data:
	kfree(id);
	return ret;
}

2807
static int nvme_init_identify(struct nvme_ctrl *ctrl)
2808 2809
{
	struct nvme_id_ctrl *id;
2810
	u32 max_hw_sectors;
2811
	bool prev_apst_enabled;
2812
	int ret;
2813

2814 2815
	ret = nvme_identify_ctrl(ctrl, &id);
	if (ret) {
2816
		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2817 2818 2819
		return -EIO;
	}

2820
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2821
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2822
		if (ret < 0)
2823
			goto out_free;
2824
	}
2825

2826 2827 2828
	if (!(ctrl->ops->flags & NVME_F_FABRICS))
		ctrl->cntlid = le16_to_cpu(id->cntlid);

2829
	if (!ctrl->identified) {
2830
		unsigned int i;
C
Christoph Hellwig 已提交
2831 2832 2833 2834 2835

		ret = nvme_init_subsystem(ctrl, id);
		if (ret)
			goto out_free;

2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
		/*
		 * Check for quirks.  Quirk can depend on firmware version,
		 * so, in principle, the set of quirks present can change
		 * across a reset.  As a possible future enhancement, we
		 * could re-scan for quirks every time we reinitialize
		 * the device, but we'd have to make sure that the driver
		 * behaves intelligently if the quirks change.
		 */
		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
			if (quirk_matches(id, &core_quirks[i]))
				ctrl->quirks |= core_quirks[i].quirks;
		}
	}

2850
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2851
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2852 2853 2854
		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
	}

2855 2856 2857 2858
	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
	ctrl->crdt[2] = le16_to_cpu(id->crdt3);

2859
	ctrl->oacs = le16_to_cpu(id->oacs);
2860
	ctrl->oncs = le16_to_cpu(id->oncs);
2861
	ctrl->mtfa = le16_to_cpu(id->mtfa);
2862
	ctrl->oaes = le32_to_cpu(id->oaes);
2863 2864 2865
	ctrl->wctemp = le16_to_cpu(id->wctemp);
	ctrl->cctemp = le16_to_cpu(id->cctemp);

2866
	atomic_set(&ctrl->abort_limit, id->acl + 1);
2867 2868
	ctrl->vwc = id->vwc;
	if (id->mdts)
2869
		max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
2870
	else
2871 2872 2873
		max_hw_sectors = UINT_MAX;
	ctrl->max_hw_sectors =
		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2874

2875
	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2876
	ctrl->sgls = le32_to_cpu(id->sgls);
S
Sagi Grimberg 已提交
2877
	ctrl->kas = le16_to_cpu(id->kas);
C
Christoph Hellwig 已提交
2878
	ctrl->max_namespaces = le32_to_cpu(id->mnan);
S
Sagi Grimberg 已提交
2879
	ctrl->ctratt = le32_to_cpu(id->ctratt);
2880

2881 2882 2883
	ctrl->cntrltype = id->cntrltype;
	ctrl->dctype = id->dctype;

2884 2885
	if (id->rtd3e) {
		/* us -> s */
2886
		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
2887 2888 2889 2890 2891

		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
						 shutdown_timeout, 60);

		if (ctrl->shutdown_timeout != shutdown_timeout)
2892
			dev_info(ctrl->device,
2893 2894 2895 2896 2897
				 "Shutdown timeout set to %u seconds\n",
				 ctrl->shutdown_timeout);
	} else
		ctrl->shutdown_timeout = shutdown_timeout;

2898
	ctrl->npss = id->npss;
2899 2900
	ctrl->apsta = id->apsta;
	prev_apst_enabled = ctrl->apst_enabled;
2901 2902
	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
		if (force_apst && id->apsta) {
2903
			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2904
			ctrl->apst_enabled = true;
2905
		} else {
2906
			ctrl->apst_enabled = false;
2907 2908
		}
	} else {
2909
		ctrl->apst_enabled = id->apsta;
2910
	}
2911 2912
	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));

2913
	if (ctrl->ops->flags & NVME_F_FABRICS) {
2914 2915 2916 2917 2918 2919 2920 2921 2922
		ctrl->icdoff = le16_to_cpu(id->icdoff);
		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
		ctrl->maxcmd = le16_to_cpu(id->maxcmd);

		/*
		 * In fabrics we need to verify the cntlid matches the
		 * admin connect
		 */
2923
		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2924 2925 2926 2927
			dev_err(ctrl->device,
				"Mismatching cntlid: Connect %u vs Identify "
				"%u, rejecting\n",
				ctrl->cntlid, le16_to_cpu(id->cntlid));
2928
			ret = -EINVAL;
2929 2930
			goto out_free;
		}
S
Sagi Grimberg 已提交
2931

2932
		if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
2933
			dev_err(ctrl->device,
S
Sagi Grimberg 已提交
2934 2935
				"keep-alive support is mandatory for fabrics\n");
			ret = -EINVAL;
2936
			goto out_free;
S
Sagi Grimberg 已提交
2937
		}
2938
	} else {
2939 2940
		ctrl->hmpre = le32_to_cpu(id->hmpre);
		ctrl->hmmin = le32_to_cpu(id->hmmin);
2941 2942
		ctrl->hmminds = le32_to_cpu(id->hmminds);
		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2943
	}
2944

2945
	ret = nvme_mpath_init_identify(ctrl, id);
C
Christoph Hellwig 已提交
2946
	if (ret < 0)
2947
		goto out_free;
C
Christoph Hellwig 已提交
2948

2949
	if (ctrl->apst_enabled && !prev_apst_enabled)
2950
		dev_pm_qos_expose_latency_tolerance(ctrl->device);
2951
	else if (!ctrl->apst_enabled && prev_apst_enabled)
2952 2953
		dev_pm_qos_hide_latency_tolerance(ctrl->device);

2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
out_free:
	kfree(id);
	return ret;
}

/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
{
	int ret;

	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
	if (ret) {
		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
		return ret;
	}

	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);

	if (ctrl->vs >= NVME_VS(1, 1, 0))
		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);

	ret = nvme_init_identify(ctrl);
	if (ret)
		return ret;

2983 2984 2985 2986
	ret = nvme_init_non_mdts_limits(ctrl);
	if (ret < 0)
		return ret;

2987 2988 2989
	ret = nvme_configure_apst(ctrl);
	if (ret < 0)
		return ret;
2990

2991 2992 2993
	ret = nvme_configure_timestamp(ctrl);
	if (ret < 0)
		return ret;
2994

2995 2996 2997 2998
	ret = nvme_configure_acre(ctrl);
	if (ret < 0)
		return ret;

2999
	if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
K
Keith Busch 已提交
3000 3001 3002 3003
		ret = nvme_hwmon_init(ctrl);
		if (ret < 0)
			return ret;
	}
3004

3005
	ctrl->identified = true;
3006

3007
	return 0;
3008
}
3009
EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3010

3011
static int nvme_dev_open(struct inode *inode, struct file *file)
3012
{
3013 3014
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3015

3016 3017 3018 3019
	switch (ctrl->state) {
	case NVME_CTRL_LIVE:
		break;
	default:
3020
		return -EWOULDBLOCK;
3021 3022
	}

3023
	nvme_get_ctrl(ctrl);
3024 3025
	if (!try_module_get(ctrl->ops->module)) {
		nvme_put_ctrl(ctrl);
3026
		return -EINVAL;
3027
	}
3028

3029
	file->private_data = ctrl;
3030 3031 3032
	return 0;
}

3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
static int nvme_dev_release(struct inode *inode, struct file *file)
{
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);

	module_put(ctrl->ops->module);
	nvme_put_ctrl(ctrl);
	return 0;
}

3043 3044 3045
static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
3046
	.release	= nvme_dev_release,
3047
	.unlocked_ioctl	= nvme_dev_ioctl,
3048
	.compat_ioctl	= compat_ptr_ioctl,
3049 3050 3051 3052 3053 3054 3055 3056 3057
};

static ssize_t nvme_sysfs_reset(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	int ret;

3058
	ret = nvme_reset_ctrl_sync(ctrl);
3059 3060 3061
	if (ret < 0)
		return ret;
	return count;
3062
}
3063
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3064

K
Keith Busch 已提交
3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
static ssize_t nvme_sysfs_rescan(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	nvme_queue_scan(ctrl);
	return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);

3076 3077 3078 3079
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
	struct gendisk *disk = dev_to_disk(dev);

J
Javier González 已提交
3080
	if (disk->fops == &nvme_bdev_ops)
3081 3082 3083 3084 3085
		return nvme_get_ns_from_dev(dev)->head;
	else
		return disk->private_data;
}

3086
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3087
		char *buf)
3088
{
3089 3090 3091
	struct nvme_ns_head *head = dev_to_ns_head(dev);
	struct nvme_ns_ids *ids = &head->ids;
	struct nvme_subsystem *subsys = head->subsys;
C
Christoph Hellwig 已提交
3092 3093
	int serial_len = sizeof(subsys->serial);
	int model_len = sizeof(subsys->model);
3094

3095
	if (!uuid_is_null(&ids->uuid))
3096
		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3097

3098
	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3099
		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3100

3101
	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3102
		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3103

C
Christoph Hellwig 已提交
3104 3105
	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
				  subsys->serial[serial_len - 1] == '\0'))
3106
		serial_len--;
C
Christoph Hellwig 已提交
3107 3108
	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
				 subsys->model[model_len - 1] == '\0'))
3109 3110
		model_len--;

3111
	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
C
Christoph Hellwig 已提交
3112
		serial_len, subsys->serial, model_len, subsys->model,
3113
		head->ns_id);
3114
}
J
Joe Perches 已提交
3115
static DEVICE_ATTR_RO(wwid);
3116

3117
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3118
		char *buf)
3119
{
3120
	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3121
}
J
Joe Perches 已提交
3122
static DEVICE_ATTR_RO(nguid);
3123

3124
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3125
		char *buf)
3126
{
3127
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3128 3129 3130 3131

	/* For backward compatibility expose the NGUID to userspace if
	 * we have no UUID set
	 */
3132
	if (uuid_is_null(&ids->uuid)) {
3133 3134
		printk_ratelimited(KERN_WARNING
				   "No UUID available providing old NGUID\n");
3135
		return sysfs_emit(buf, "%pU\n", ids->nguid);
3136
	}
3137
	return sysfs_emit(buf, "%pU\n", &ids->uuid);
3138
}
J
Joe Perches 已提交
3139
static DEVICE_ATTR_RO(uuid);
3140 3141

static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3142
		char *buf)
3143
{
3144
	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3145
}
J
Joe Perches 已提交
3146
static DEVICE_ATTR_RO(eui);
3147 3148

static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3149
		char *buf)
3150
{
3151
	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3152
}
J
Joe Perches 已提交
3153
static DEVICE_ATTR_RO(nsid);
3154

3155
static struct attribute *nvme_ns_id_attrs[] = {
3156
	&dev_attr_wwid.attr,
3157
	&dev_attr_uuid.attr,
3158
	&dev_attr_nguid.attr,
3159 3160
	&dev_attr_eui.attr,
	&dev_attr_nsid.attr,
C
Christoph Hellwig 已提交
3161 3162 3163 3164
#ifdef CONFIG_NVME_MULTIPATH
	&dev_attr_ana_grpid.attr,
	&dev_attr_ana_state.attr,
#endif
3165 3166 3167
	NULL,
};

3168
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3169 3170 3171
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
3172
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3173 3174

	if (a == &dev_attr_uuid.attr) {
3175
		if (uuid_is_null(&ids->uuid) &&
3176
		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3177 3178 3179
			return 0;
	}
	if (a == &dev_attr_nguid.attr) {
3180
		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3181 3182 3183
			return 0;
	}
	if (a == &dev_attr_eui.attr) {
3184
		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3185 3186
			return 0;
	}
C
Christoph Hellwig 已提交
3187 3188
#ifdef CONFIG_NVME_MULTIPATH
	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
J
Javier González 已提交
3189
		if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
C
Christoph Hellwig 已提交
3190 3191 3192 3193 3194
			return 0;
		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
			return 0;
	}
#endif
3195 3196 3197
	return a->mode;
}

3198
static const struct attribute_group nvme_ns_id_attr_group = {
3199 3200
	.attrs		= nvme_ns_id_attrs,
	.is_visible	= nvme_ns_id_attrs_are_visible,
3201 3202
};

3203 3204 3205 3206 3207
const struct attribute_group *nvme_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
	NULL,
};

M
Ming Lin 已提交
3208
#define nvme_show_str_function(field)						\
3209 3210 3211 3212
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3213
        return sysfs_emit(buf, "%.*s\n",					\
C
Christoph Hellwig 已提交
3214
		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3215 3216 3217
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

C
Christoph Hellwig 已提交
3218 3219 3220 3221
nvme_show_str_function(model);
nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);

M
Ming Lin 已提交
3222 3223 3224 3225 3226
#define nvme_show_int_function(field)						\
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3227
        return sysfs_emit(buf, "%d\n", ctrl->field);				\
M
Ming Lin 已提交
3228 3229 3230 3231
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

nvme_show_int_function(cntlid);
3232
nvme_show_int_function(numa_node);
3233 3234
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
3235
nvme_show_int_function(kato);
3236

M
Ming Lin 已提交
3237 3238 3239 3240 3241 3242 3243
static ssize_t nvme_sysfs_delete(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (device_remove_file_self(dev, attr))
3244
		nvme_delete_ctrl_sync(ctrl);
M
Ming Lin 已提交
3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
	return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);

static ssize_t nvme_sysfs_show_transport(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3255
	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
M
Ming Lin 已提交
3256 3257 3258
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

3259 3260 3261 3262 3263 3264 3265 3266 3267
static ssize_t nvme_sysfs_show_state(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	static const char *const state_name[] = {
		[NVME_CTRL_NEW]		= "new",
		[NVME_CTRL_LIVE]	= "live",
		[NVME_CTRL_RESETTING]	= "resetting",
3268
		[NVME_CTRL_CONNECTING]	= "connecting",
3269
		[NVME_CTRL_DELETING]	= "deleting",
3270
		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3271 3272 3273 3274 3275
		[NVME_CTRL_DEAD]	= "dead",
	};

	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
	    state_name[ctrl->state])
3276
		return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3277

3278
	return sysfs_emit(buf, "unknown state\n");
3279 3280 3281 3282
}

static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);

M
Ming Lin 已提交
3283 3284 3285 3286 3287 3288
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3289
	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
M
Ming Lin 已提交
3290 3291 3292
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

3293 3294 3295 3296 3297 3298
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3299
	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3300 3301 3302
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);

3303 3304 3305 3306 3307 3308
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3309
	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3310 3311 3312
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);

M
Ming Lin 已提交
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
static ssize_t nvme_sysfs_show_address(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);

3323 3324 3325 3326 3327 3328 3329
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;

	if (ctrl->opts->max_reconnects == -1)
3330 3331 3332
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n",
			  opts->max_reconnects * opts->reconnect_delay);
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
}

static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ctrl_loss_tmo, err;

	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
	if (err)
		return -EINVAL;

3346
	if (ctrl_loss_tmo < 0)
3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
		opts->max_reconnects = -1;
	else
		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
						opts->reconnect_delay);
	return count;
}
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);

static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->reconnect_delay == -1)
3362 3363
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
}

static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	unsigned int v;
	int err;

	err = kstrtou32(buf, 10, &v);
3374 3375
	if (err)
		return err;
3376 3377 3378 3379 3380 3381 3382

	ctrl->opts->reconnect_delay = v;
	return count;
}
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);

3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->fast_io_fail_tmo == -1)
		return sysfs_emit(buf, "off\n");
	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
}

static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int fast_io_fail_tmo, err;

	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
	if (err)
		return -EINVAL;

	if (fast_io_fail_tmo < 0)
		opts->fast_io_fail_tmo = -1;
	else
		opts->fast_io_fail_tmo = fast_io_fail_tmo;
	return count;
}
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);

3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
static ssize_t cntrltype_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	static const char * const type[] = {
		[NVME_CTRL_IO] = "io\n",
		[NVME_CTRL_DISC] = "discovery\n",
		[NVME_CTRL_ADMIN] = "admin\n",
	};
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
		return sysfs_emit(buf, "reserved\n");

	return sysfs_emit(buf, type[ctrl->cntrltype]);
}
static DEVICE_ATTR_RO(cntrltype);

static ssize_t dctype_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
{
	static const char * const type[] = {
		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
		[NVME_DCTYPE_DDC] = "ddc\n",
		[NVME_DCTYPE_CDC] = "cdc\n",
	};
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
		return sysfs_emit(buf, "reserved\n");

	return sysfs_emit(buf, type[ctrl->dctype]);
}
static DEVICE_ATTR_RO(dctype);

3447 3448
static struct attribute *nvme_dev_attrs[] = {
	&dev_attr_reset_controller.attr,
K
Keith Busch 已提交
3449
	&dev_attr_rescan_controller.attr,
3450 3451 3452
	&dev_attr_model.attr,
	&dev_attr_serial.attr,
	&dev_attr_firmware_rev.attr,
M
Ming Lin 已提交
3453
	&dev_attr_cntlid.attr,
M
Ming Lin 已提交
3454 3455 3456 3457
	&dev_attr_delete_controller.attr,
	&dev_attr_transport.attr,
	&dev_attr_subsysnqn.attr,
	&dev_attr_address.attr,
3458
	&dev_attr_state.attr,
3459
	&dev_attr_numa_node.attr,
3460 3461
	&dev_attr_queue_count.attr,
	&dev_attr_sqsize.attr,
3462
	&dev_attr_hostnqn.attr,
3463
	&dev_attr_hostid.attr,
3464 3465
	&dev_attr_ctrl_loss_tmo.attr,
	&dev_attr_reconnect_delay.attr,
3466
	&dev_attr_fast_io_fail_tmo.attr,
3467
	&dev_attr_kato.attr,
3468 3469
	&dev_attr_cntrltype.attr,
	&dev_attr_dctype.attr,
3470 3471 3472
	NULL
};

M
Ming Lin 已提交
3473 3474 3475 3476 3477 3478
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3479 3480 3481 3482
	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
		return 0;
	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
		return 0;
3483 3484
	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
		return 0;
3485 3486
	if (a == &dev_attr_hostid.attr && !ctrl->opts)
		return 0;
3487 3488 3489 3490
	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
		return 0;
	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
		return 0;
3491 3492
	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
		return 0;
M
Ming Lin 已提交
3493 3494 3495 3496

	return a->mode;
}

3497
static const struct attribute_group nvme_dev_attrs_group = {
M
Ming Lin 已提交
3498 3499
	.attrs		= nvme_dev_attrs,
	.is_visible	= nvme_dev_attrs_are_visible,
3500 3501 3502 3503 3504 3505 3506
};

static const struct attribute_group *nvme_dev_attr_groups[] = {
	&nvme_dev_attrs_group,
	NULL,
};

3507
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
C
Christoph Hellwig 已提交
3508 3509 3510 3511 3512 3513 3514
		unsigned nsid)
{
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
3515 3516 3517
		if (h->ns_id != nsid)
			continue;
		if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
C
Christoph Hellwig 已提交
3518 3519 3520 3521 3522 3523
			return h;
	}

	return NULL;
}

3524 3525
static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
		struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3526
{
3527 3528 3529
	bool has_uuid = !uuid_is_null(&ids->uuid);
	bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
	bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
C
Christoph Hellwig 已提交
3530 3531 3532 3533 3534
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
3535 3536 3537 3538 3539 3540 3541
		if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
			return -EINVAL;
		if (has_nguid &&
		    memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
			return -EINVAL;
		if (has_eui64 &&
		    memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
C
Christoph Hellwig 已提交
3542 3543 3544 3545 3546 3547
			return -EINVAL;
	}

	return 0;
}

3548 3549
static void nvme_cdev_rel(struct device *dev)
{
3550
	ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3551 3552
}

3553 3554 3555
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{
	cdev_device_del(cdev, cdev_device);
3556
	put_device(cdev_device);
3557 3558 3559 3560 3561 3562 3563
}

int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
		const struct file_operations *fops, struct module *owner)
{
	int minor, ret;

3564
	minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
3565 3566 3567 3568
	if (minor < 0)
		return minor;
	cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
	cdev_device->class = nvme_ns_chr_class;
3569
	cdev_device->release = nvme_cdev_rel;
3570 3571 3572 3573
	device_initialize(cdev_device);
	cdev_init(cdev, fops);
	cdev->owner = owner;
	ret = cdev_device_add(cdev, cdev_device);
3574
	if (ret)
3575
		put_device(cdev_device);
3576

3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607
	return ret;
}

static int nvme_ns_chr_open(struct inode *inode, struct file *file)
{
	return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
}

static int nvme_ns_chr_release(struct inode *inode, struct file *file)
{
	nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
	return 0;
}

static const struct file_operations nvme_ns_chr_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_ns_chr_open,
	.release	= nvme_ns_chr_release,
	.unlocked_ioctl	= nvme_ns_chr_ioctl,
	.compat_ioctl	= compat_ptr_ioctl,
};

static int nvme_add_ns_cdev(struct nvme_ns *ns)
{
	int ret;

	ns->cdev_device.parent = ns->ctrl->device;
	ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
			   ns->ctrl->instance, ns->head->instance);
	if (ret)
		return ret;
3608 3609 3610

	return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
			     ns->ctrl->ops->module);
3611 3612
}

C
Christoph Hellwig 已提交
3613
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
K
Keith Busch 已提交
3614
		unsigned nsid, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3615 3616
{
	struct nvme_ns_head *head;
3617
	size_t size = sizeof(*head);
C
Christoph Hellwig 已提交
3618 3619
	int ret = -ENOMEM;

3620 3621 3622 3623 3624
#ifdef CONFIG_NVME_MULTIPATH
	size += num_possible_nodes() * sizeof(struct nvme_ns *);
#endif

	head = kzalloc(size, GFP_KERNEL);
C
Christoph Hellwig 已提交
3625 3626
	if (!head)
		goto out;
3627
	ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
C
Christoph Hellwig 已提交
3628 3629 3630 3631
	if (ret < 0)
		goto out_free_head;
	head->instance = ret;
	INIT_LIST_HEAD(&head->list);
3632 3633 3634
	ret = init_srcu_struct(&head->srcu);
	if (ret)
		goto out_ida_remove;
C
Christoph Hellwig 已提交
3635 3636
	head->subsys = ctrl->subsys;
	head->ns_id = nsid;
3637
	head->ids = *ids;
C
Christoph Hellwig 已提交
3638 3639
	kref_init(&head->ref);

3640 3641 3642 3643 3644 3645 3646
	if (head->ids.csi) {
		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
		if (ret)
			goto out_cleanup_srcu;
	} else
		head->effects = ctrl->effects;

3647 3648 3649 3650
	ret = nvme_mpath_alloc_disk(ctrl, head);
	if (ret)
		goto out_cleanup_srcu;

C
Christoph Hellwig 已提交
3651
	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3652 3653 3654

	kref_get(&ctrl->subsys->ref);

C
Christoph Hellwig 已提交
3655 3656 3657
	return head;
out_cleanup_srcu:
	cleanup_srcu_struct(&head->srcu);
3658
out_ida_remove:
3659
	ida_free(&ctrl->subsys->ns_ida, head->instance);
C
Christoph Hellwig 已提交
3660 3661 3662
out_free_head:
	kfree(head);
out:
3663 3664
	if (ret > 0)
		ret = blk_status_to_errno(nvme_error_status(ret));
C
Christoph Hellwig 已提交
3665 3666 3667
	return ERR_PTR(ret);
}

3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693
static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
		struct nvme_ns_ids *ids)
{
	struct nvme_subsystem *s;
	int ret = 0;

	/*
	 * Note that this check is racy as we try to avoid holding the global
	 * lock over the whole ns_head creation.  But it is only intended as
	 * a sanity check anyway.
	 */
	mutex_lock(&nvme_subsystems_lock);
	list_for_each_entry(s, &nvme_subsystems, entry) {
		if (s == this)
			continue;
		mutex_lock(&s->lock);
		ret = nvme_subsys_check_duplicate_ids(s, ids);
		mutex_unlock(&s->lock);
		if (ret)
			break;
	}
	mutex_unlock(&nvme_subsystems_lock);

	return ret;
}

C
Christoph Hellwig 已提交
3694
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3695
		struct nvme_ns_ids *ids, bool is_shared)
C
Christoph Hellwig 已提交
3696 3697 3698
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_ns_head *head = NULL;
3699 3700 3701 3702 3703 3704 3705 3706
	int ret;

	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
	if (ret) {
		dev_err(ctrl->device,
			"globally duplicate IDs for nsid %d\n", nsid);
		return ret;
	}
C
Christoph Hellwig 已提交
3707 3708

	mutex_lock(&ctrl->subsys->lock);
3709
	head = nvme_find_ns_head(ctrl->subsys, nsid);
C
Christoph Hellwig 已提交
3710
	if (!head) {
3711 3712 3713
		ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
		if (ret) {
			dev_err(ctrl->device,
3714 3715
				"duplicate IDs in subsystem for nsid %d\n",
				nsid);
3716 3717
			goto out_unlock;
		}
3718
		head = nvme_alloc_ns_head(ctrl, nsid, ids);
C
Christoph Hellwig 已提交
3719 3720 3721 3722
		if (IS_ERR(head)) {
			ret = PTR_ERR(head);
			goto out_unlock;
		}
3723
		head->shared = is_shared;
C
Christoph Hellwig 已提交
3724
	} else {
3725
		ret = -EINVAL;
3726
		if (!is_shared || !head->shared) {
3727
			dev_err(ctrl->device,
3728 3729
				"Duplicate unshared namespace %d\n", nsid);
			goto out_put_ns_head;
3730
		}
3731
		if (!nvme_ns_ids_equal(&head->ids, ids)) {
C
Christoph Hellwig 已提交
3732 3733 3734
			dev_err(ctrl->device,
				"IDs don't match for shared namespace %d\n",
					nsid);
3735
			goto out_put_ns_head;
C
Christoph Hellwig 已提交
3736 3737 3738
		}
	}

3739
	list_add_tail_rcu(&ns->siblings, &head->list);
C
Christoph Hellwig 已提交
3740
	ns->head = head;
3741 3742
	mutex_unlock(&ctrl->subsys->lock);
	return 0;
C
Christoph Hellwig 已提交
3743

3744 3745
out_put_ns_head:
	nvme_put_ns_head(head);
C
Christoph Hellwig 已提交
3746 3747 3748 3749 3750
out_unlock:
	mutex_unlock(&ctrl->subsys->lock);
	return ret;
}

3751
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3752
{
3753
	struct nvme_ns *ns, *ret = NULL;
3754

3755
	down_read(&ctrl->namespaces_rwsem);
3756
	list_for_each_entry(ns, &ctrl->namespaces, list) {
C
Christoph Hellwig 已提交
3757
		if (ns->head->ns_id == nsid) {
K
Kanchan Joshi 已提交
3758
			if (!nvme_get_ns(ns))
3759
				continue;
3760 3761 3762
			ret = ns;
			break;
		}
C
Christoph Hellwig 已提交
3763
		if (ns->head->ns_id > nsid)
3764 3765
			break;
	}
3766
	up_read(&ctrl->namespaces_rwsem);
3767
	return ret;
3768
}
3769
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3770

3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
/*
 * Add the namespace to the controller list while keeping the list ordered.
 */
static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{
	struct nvme_ns *tmp;

	list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
		if (tmp->head->ns_id < ns->head->ns_id) {
			list_add(&ns->list, &tmp->list);
			return;
		}
	}
	list_add(&ns->list, &ns->ctrl->namespaces);
}

3787 3788
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
		struct nvme_ns_ids *ids)
3789 3790 3791
{
	struct nvme_ns *ns;
	struct gendisk *disk;
3792
	struct nvme_id_ns *id;
3793
	int node = ctrl->numa_node;
3794

3795
	if (nvme_identify_ns(ctrl, nsid, ids, &id))
3796 3797
		return;

3798 3799
	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
3800
		goto out_free_id;
3801

C
Christoph Hellwig 已提交
3802 3803
	disk = blk_mq_alloc_disk(ctrl->tagset, ns);
	if (IS_ERR(disk))
C
Christoph Hellwig 已提交
3804
		goto out_free_ns;
C
Christoph Hellwig 已提交
3805 3806 3807 3808 3809
	disk->fops = &nvme_bdev_ops;
	disk->private_data = ns;

	ns->disk = disk;
	ns->queue = disk->queue;
3810

3811
	if (ctrl->opts && ctrl->opts->data_digest)
3812
		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3813

3814
	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3815 3816 3817
	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

3818 3819 3820
	ns->ctrl = ctrl;
	kref_init(&ns->kref);

3821
	if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
C
Christoph Hellwig 已提交
3822
		goto out_cleanup_disk;
3823

3824 3825 3826 3827 3828 3829 3830 3831
	/*
	 * Without the multipath code enabled, multiple controller per
	 * subsystems are visible as devices and thus we cannot use the
	 * subsystem instance.
	 */
	if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
			ns->head->instance);
3832

3833
	if (nvme_update_ns_info(ns, id))
C
Christoph Hellwig 已提交
3834
		goto out_unlink_ns;
3835

3836
	down_write(&ctrl->namespaces_rwsem);
3837
	nvme_ns_add_to_ctrl_list(ns);
3838
	up_write(&ctrl->namespaces_rwsem);
3839
	nvme_get_ctrl(ctrl);
3840

3841 3842 3843
	if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
		goto out_cleanup_ns_from_list;

3844 3845
	if (!nvme_ns_head_multipath(ns->head))
		nvme_add_ns_cdev(ns);
3846

C
Christoph Hellwig 已提交
3847
	nvme_mpath_add_disk(ns, id);
3848
	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
C
Christoph Hellwig 已提交
3849 3850
	kfree(id);

3851
	return;
C
Christoph Hellwig 已提交
3852

3853 3854 3855 3856 3857
 out_cleanup_ns_from_list:
	nvme_put_ctrl(ctrl);
	down_write(&ctrl->namespaces_rwsem);
	list_del_init(&ns->list);
	up_write(&ctrl->namespaces_rwsem);
C
Christoph Hellwig 已提交
3858 3859 3860
 out_unlink_ns:
	mutex_lock(&ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
3861 3862
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
C
Christoph Hellwig 已提交
3863
	mutex_unlock(&ctrl->subsys->lock);
3864
	nvme_put_ns_head(ns->head);
C
Christoph Hellwig 已提交
3865 3866
 out_cleanup_disk:
	blk_cleanup_disk(disk);
3867 3868
 out_free_ns:
	kfree(ns);
3869 3870
 out_free_id:
	kfree(id);
3871 3872 3873 3874
}

static void nvme_ns_remove(struct nvme_ns *ns)
{
3875 3876
	bool last_path = false;

3877 3878
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;
3879

3880
	clear_bit(NVME_NS_READY, &ns->flags);
3881
	set_capacity(ns->disk, 0);
3882
	nvme_fault_inject_fini(&ns->fault_inject);
3883 3884 3885

	mutex_lock(&ns->ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
3886 3887 3888 3889
	if (list_empty(&ns->head->list)) {
		list_del_init(&ns->head->entry);
		last_path = true;
	}
3890
	mutex_unlock(&ns->ctrl->subsys->lock);
3891

3892 3893 3894 3895 3896 3897
	/* guarantee not available in head->list */
	synchronize_rcu();

	/* wait for concurrent submissions */
	if (nvme_mpath_clear_current_path(ns))
		synchronize_srcu(&ns->head->srcu);
3898

3899 3900 3901 3902
	if (!nvme_ns_head_multipath(ns->head))
		nvme_cdev_del(&ns->cdev, &ns->cdev_device);
	del_gendisk(ns->disk);
	blk_cleanup_queue(ns->queue);
3903

3904
	down_write(&ns->ctrl->namespaces_rwsem);
3905
	list_del_init(&ns->list);
3906
	up_write(&ns->ctrl->namespaces_rwsem);
3907

3908 3909
	if (last_path)
		nvme_mpath_shutdown_disk(ns->head);
3910 3911 3912
	nvme_put_ns(ns);
}

3913 3914 3915 3916 3917 3918 3919 3920 3921 3922
static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{
	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);

	if (ns) {
		nvme_ns_remove(ns);
		nvme_put_ns(ns);
	}
}

3923
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3924 3925
{
	struct nvme_id_ns *id;
3926
	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
3927

3928 3929
	if (test_bit(NVME_NS_DEAD, &ns->flags))
		goto out;
C
Christoph Hellwig 已提交
3930

3931
	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
C
Christoph Hellwig 已提交
3932 3933 3934
	if (ret)
		goto out;

3935
	ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
3936
	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
3937
		dev_err(ns->ctrl->device,
C
Christoph Hellwig 已提交
3938
			"identifiers changed for nsid %d\n", ns->head->ns_id);
3939
		goto out_free_id;
C
Christoph Hellwig 已提交
3940 3941 3942
	}

	ret = nvme_update_ns_info(ns, id);
3943 3944

out_free_id:
C
Christoph Hellwig 已提交
3945 3946 3947
	kfree(id);
out:
	/*
3948
	 * Only remove the namespace if we got a fatal error back from the
C
Christoph Hellwig 已提交
3949
	 * device, otherwise ignore the error and just move on.
3950 3951
	 *
	 * TODO: we should probably schedule a delayed retry here.
C
Christoph Hellwig 已提交
3952
	 */
3953
	if (ret > 0 && (ret & NVME_SC_DNR))
3954
		nvme_ns_remove(ns);
C
Christoph Hellwig 已提交
3955 3956
}

3957
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3958
{
3959
	struct nvme_ns_ids ids = { };
3960 3961
	struct nvme_ns *ns;

3962 3963
	if (nvme_identify_ns_descs(ctrl, nsid, &ids))
		return;
3964

3965
	ns = nvme_find_get_ns(ctrl, nsid);
3966
	if (ns) {
3967
		nvme_validate_ns(ns, &ids);
3968
		nvme_put_ns(ns);
3969 3970 3971
		return;
	}

3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982
	switch (ids.csi) {
	case NVME_CSI_NVM:
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	case NVME_CSI_ZNS:
		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
			dev_warn(ctrl->device,
				"nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
				nsid);
			break;
		}
3983 3984 3985
		if (!nvme_multi_css(ctrl)) {
			dev_warn(ctrl->device,
				"command set not reported for nsid: %d\n",
3986
				nsid);
3987 3988
			break;
		}
3989 3990 3991 3992 3993 3994 3995
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	default:
		dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
			ids.csi, nsid);
		break;
	}
3996 3997
}

3998 3999 4000 4001
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{
	struct nvme_ns *ns, *next;
4002
	LIST_HEAD(rm_list);
4003

4004
	down_write(&ctrl->namespaces_rwsem);
4005
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4006
		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4007
			list_move_tail(&ns->list, &rm_list);
4008
	}
4009
	up_write(&ctrl->namespaces_rwsem);
4010 4011 4012 4013

	list_for_each_entry_safe(ns, next, &rm_list, list)
		nvme_ns_remove(ns);

4014 4015
}

4016
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4017
{
4018
	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4019
	__le32 *ns_list;
4020 4021
	u32 prev = 0;
	int ret = 0, i;
4022

4023 4024
	if (nvme_ctrl_limited_cns(ctrl))
		return -EOPNOTSUPP;
4025

4026
	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4027 4028 4029
	if (!ns_list)
		return -ENOMEM;

4030
	for (;;) {
4031 4032 4033 4034 4035 4036 4037 4038
		struct nvme_command cmd = {
			.identify.opcode	= nvme_admin_identify,
			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
			.identify.nsid		= cpu_to_le32(prev),
		};

		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
					    NVME_IDENTIFY_DATA_SIZE);
4039 4040 4041
		if (ret) {
			dev_warn(ctrl->device,
				"Identify NS List failed (status=0x%x)\n", ret);
4042
			goto free;
4043
		}
4044

4045
		for (i = 0; i < nr_entries; i++) {
4046
			u32 nsid = le32_to_cpu(ns_list[i]);
4047

4048 4049
			if (!nsid)	/* end of the list? */
				goto out;
4050
			nvme_validate_or_alloc_ns(ctrl, nsid);
4051 4052
			while (++prev < nsid)
				nvme_ns_remove_by_nsid(ctrl, prev);
4053 4054 4055
		}
	}
 out:
4056 4057
	nvme_remove_invalid_namespaces(ctrl, prev);
 free:
4058 4059 4060 4061
	kfree(ns_list);
	return ret;
}

4062
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4063
{
4064 4065 4066 4067 4068 4069 4070
	struct nvme_id_ctrl *id;
	u32 nn, i;

	if (nvme_identify_ctrl(ctrl, &id))
		return;
	nn = le32_to_cpu(id->nn);
	kfree(id);
4071

4072
	for (i = 1; i <= nn; i++)
4073
		nvme_validate_or_alloc_ns(ctrl, i);
4074

4075
	nvme_remove_invalid_namespaces(ctrl, nn);
4076 4077
}

4078
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4079 4080 4081
{
	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
	__le32 *log;
4082
	int error;
4083 4084 4085

	log = kzalloc(log_size, GFP_KERNEL);
	if (!log)
4086
		return;
4087

4088 4089 4090 4091 4092 4093
	/*
	 * We need to read the log to clear the AEN, but we don't want to rely
	 * on it for the changed namespace information as userspace could have
	 * raced with us in reading the log page, which could cause us to miss
	 * updates.
	 */
4094 4095
	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
			NVME_CSI_NVM, log, log_size, 0);
4096
	if (error)
4097 4098 4099 4100 4101 4102
		dev_warn(ctrl->device,
			"reading changed ns log failed: %d\n", error);

	kfree(log);
}

4103
static void nvme_scan_work(struct work_struct *work)
4104
{
4105 4106
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, scan_work);
4107

K
Keith Busch 已提交
4108 4109
	/* No tagset on a live ctrl means IO queues could not created */
	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4110 4111
		return;

D
Dan Carpenter 已提交
4112
	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4113
		dev_info(ctrl->device, "rescanning namespaces.\n");
4114
		nvme_clear_changed_ns_log(ctrl);
4115 4116
	}

4117
	mutex_lock(&ctrl->scan_lock);
4118 4119
	if (nvme_scan_ns_list(ctrl) != 0)
		nvme_scan_ns_sequential(ctrl);
4120
	mutex_unlock(&ctrl->scan_lock);
4121
}
4122

4123 4124 4125 4126 4127
/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
4128 4129 4130
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;
4131
	LIST_HEAD(ns_list);
4132

4133 4134 4135 4136 4137 4138 4139
	/*
	 * make sure to requeue I/O to all namespaces as these
	 * might result from the scan itself and must complete
	 * for the scan_work to make progress
	 */
	nvme_mpath_clear_ctrl_paths(ctrl);

4140 4141 4142
	/* prevent racing with ns scanning */
	flush_work(&ctrl->scan_work);

4143 4144 4145 4146 4147 4148 4149 4150 4151
	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
	 * removing the namespaces' disks; fail all the queues now to avoid
	 * potentially having to clean up the failed sync later.
	 */
	if (ctrl->state == NVME_CTRL_DEAD)
		nvme_kill_queues(ctrl);

4152 4153 4154
	/* this is a no-op when called from the controller reset handler */
	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);

4155
	down_write(&ctrl->namespaces_rwsem);
4156
	list_splice_init(&ctrl->namespaces, &ns_list);
4157
	up_write(&ctrl->namespaces_rwsem);
4158 4159

	list_for_each_entry_safe(ns, next, &ns_list, list)
4160 4161
		nvme_ns_remove(ns);
}
4162
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4163

4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186
static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ret;

	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
	if (ret)
		return ret;

	if (opts) {
		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
				opts->trsvcid ?: "none");
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
				opts->host_traddr ?: "none");
4187 4188 4189 4190 4191
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
				opts->host_iface ?: "none");
4192 4193 4194 4195
	}
	return ret;
}

4196 4197 4198 4199 4200 4201 4202
static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
{
	char *envp[2] = { envdata, NULL };

	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
}

4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
	char *envp[2] = { NULL, NULL };
	u32 aen_result = ctrl->aen_result;

	ctrl->aen_result = 0;
	if (!aen_result)
		return;

	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
	if (!envp[0])
		return;
	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
	kfree(envp[0]);
}

4219 4220 4221 4222 4223
static void nvme_async_event_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, async_event_work);

4224
	nvme_aen_uevent(ctrl);
4225 4226 4227 4228 4229 4230 4231 4232

	/*
	 * The transport drivers must guarantee AER submission here is safe by
	 * flushing ctrl async_event_work after changing the controller state
	 * from LIVE and before freeing the admin queue.
	*/
	if (ctrl->state == NVME_CTRL_LIVE)
		ctrl->ops->submit_async_event(ctrl);
4233 4234
}

4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{

	u32 csts;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
		return false;

	if (csts == ~0)
		return false;

	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
	struct nvme_fw_slot_info_log *log;

	log = kmalloc(sizeof(*log), GFP_KERNEL);
	if (!log)
		return;

4257 4258
	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
			log, sizeof(*log), 0))
4259
		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280
	kfree(log);
}

static void nvme_fw_act_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(work,
				struct nvme_ctrl, fw_act_work);
	unsigned long fw_act_timeout;

	if (ctrl->mtfa)
		fw_act_timeout = jiffies +
				msecs_to_jiffies(ctrl->mtfa * 100);
	else
		fw_act_timeout = jiffies +
				msecs_to_jiffies(admin_timeout * 1000);

	nvme_stop_queues(ctrl);
	while (nvme_ctrl_pp_status(ctrl)) {
		if (time_after(jiffies, fw_act_timeout)) {
			dev_warn(ctrl->device,
				"Fw activation timeout, reset controller\n");
4281 4282
			nvme_try_sched_reset(ctrl);
			return;
4283 4284 4285 4286
		}
		msleep(100);
	}

4287
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4288 4289 4290
		return;

	nvme_start_queues(ctrl);
4291
	/* read FW slot information to clear the AER */
4292 4293 4294
	nvme_get_fw_slot_info(ctrl);
}

4295 4296
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
4297 4298
	u32 aer_notice_type = (result & 0xff00) >> 8;

4299 4300
	trace_nvme_async_event(ctrl, aer_notice_type);

4301
	switch (aer_notice_type) {
4302
	case NVME_AER_NOTICE_NS_CHANGED:
D
Dan Carpenter 已提交
4303
		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4304 4305 4306
		nvme_queue_scan(ctrl);
		break;
	case NVME_AER_NOTICE_FW_ACT_STARTING:
4307 4308 4309 4310 4311 4312 4313
		/*
		 * We are (ab)using the RESETTING state to prevent subsequent
		 * recovery actions from interfering with the controller's
		 * firmware activation.
		 */
		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
			queue_work(nvme_wq, &ctrl->fw_act_work);
4314
		break;
C
Christoph Hellwig 已提交
4315 4316 4317 4318 4319 4320 4321
#ifdef CONFIG_NVME_MULTIPATH
	case NVME_AER_NOTICE_ANA:
		if (!ctrl->ana_log_buf)
			break;
		queue_work(nvme_wq, &ctrl->ana_work);
		break;
#endif
4322 4323 4324
	case NVME_AER_NOTICE_DISC_CHANGED:
		ctrl->aen_result = result;
		break;
4325 4326 4327 4328 4329
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
}

4330
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4331
		volatile union nvme_result *res)
4332
{
4333
	u32 result = le32_to_cpu(res->u32);
4334
	u32 aer_type = result & 0x07;
4335

4336
	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4337 4338
		return;

4339
	switch (aer_type) {
4340 4341 4342
	case NVME_AER_NOTICE:
		nvme_handle_aen_notice(ctrl, result);
		break;
4343 4344 4345 4346
	case NVME_AER_ERROR:
	case NVME_AER_SMART:
	case NVME_AER_CSS:
	case NVME_AER_VS:
4347
		trace_nvme_async_event(ctrl, aer_type);
4348
		ctrl->aen_result = result;
4349 4350 4351
		break;
	default:
		break;
4352
	}
4353
	queue_work(nvme_wq, &ctrl->async_event_work);
4354 4355
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4356

4357
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4358
{
C
Christoph Hellwig 已提交
4359
	nvme_mpath_stop(ctrl);
4360
	nvme_stop_keep_alive(ctrl);
4361
	nvme_stop_failfast_work(ctrl);
4362
	flush_work(&ctrl->async_event_work);
4363
	cancel_work_sync(&ctrl->fw_act_work);
4364 4365 4366 4367 4368
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
4369
	nvme_start_keep_alive(ctrl);
4370

4371 4372
	nvme_enable_aen(ctrl);

4373 4374 4375 4376
	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_start_queues(ctrl);
	}
4377 4378

	nvme_change_uevent(ctrl, "NVME_EVENT=connected");
4379 4380
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4381

4382 4383
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
4384
	nvme_hwmon_exit(ctrl);
4385
	nvme_fault_inject_fini(&ctrl->fault_inject);
4386
	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4387
	cdev_device_del(&ctrl->cdev, ctrl->device);
4388
	nvme_put_ctrl(ctrl);
4389
}
4390
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4391

4392 4393 4394 4395 4396
static void nvme_free_cels(struct nvme_ctrl *ctrl)
{
	struct nvme_effects_log	*cel;
	unsigned long i;

4397
	xa_for_each(&ctrl->cels, i, cel) {
4398 4399 4400 4401 4402 4403 4404
		xa_erase(&ctrl->cels, i);
		kfree(cel);
	}

	xa_destroy(&ctrl->cels);
}

4405
static void nvme_free_ctrl(struct device *dev)
4406
{
4407 4408
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
C
Christoph Hellwig 已提交
4409
	struct nvme_subsystem *subsys = ctrl->subsys;
4410

K
Keith Busch 已提交
4411
	if (!subsys || ctrl->instance != subsys->instance)
4412
		ida_free(&nvme_instance_ida, ctrl->instance);
4413

4414
	nvme_free_cels(ctrl);
C
Christoph Hellwig 已提交
4415
	nvme_mpath_uninit(ctrl);
S
Sagi Grimberg 已提交
4416
	__free_page(ctrl->discard_page);
4417

C
Christoph Hellwig 已提交
4418
	if (subsys) {
4419
		mutex_lock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4420 4421
		list_del(&ctrl->subsys_entry);
		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4422
		mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4423
	}
4424 4425 4426

	ctrl->ops->free_ctrl(ctrl);

C
Christoph Hellwig 已提交
4427 4428
	if (subsys)
		nvme_put_subsystem(subsys);
4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440
}

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
	int ret;

4441
	ctrl->state = NVME_CTRL_NEW;
4442
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4443
	spin_lock_init(&ctrl->lock);
4444
	mutex_init(&ctrl->scan_lock);
4445
	INIT_LIST_HEAD(&ctrl->namespaces);
4446
	xa_init(&ctrl->cels);
4447
	init_rwsem(&ctrl->namespaces_rwsem);
4448 4449 4450
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->quirks = quirks;
4451
	ctrl->numa_node = NUMA_NO_NODE;
4452
	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4453
	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4454
	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4455
	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4456
	init_waitqueue_head(&ctrl->state_wq);
4457

4458
	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4459
	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4460 4461 4462
	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;

4463 4464 4465 4466 4467 4468 4469 4470
	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
			PAGE_SIZE);
	ctrl->discard_page = alloc_page(GFP_KERNEL);
	if (!ctrl->discard_page) {
		ret = -ENOMEM;
		goto out;
	}

4471
	ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
4472
	if (ret < 0)
4473
		goto out;
4474
	ctrl->instance = ret;
4475

4476 4477
	device_initialize(&ctrl->ctrl_device);
	ctrl->device = &ctrl->ctrl_device;
4478 4479
	ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
			ctrl->instance);
4480 4481 4482 4483 4484 4485 4486
	ctrl->device->class = nvme_class;
	ctrl->device->parent = ctrl->dev;
	ctrl->device->groups = nvme_dev_attr_groups;
	ctrl->device->release = nvme_free_ctrl;
	dev_set_drvdata(ctrl->device, ctrl);
	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
	if (ret)
4487 4488
		goto out_release_instance;

4489
	nvme_get_ctrl(ctrl);
4490 4491 4492
	cdev_init(&ctrl->cdev, &nvme_dev_fops);
	ctrl->cdev.owner = ops->module;
	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4493 4494
	if (ret)
		goto out_free_name;
4495

4496 4497 4498 4499 4500 4501 4502 4503
	/*
	 * Initialize latency tolerance controls.  The sysfs files won't
	 * be visible to userspace unless the device actually supports APST.
	 */
	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
		min(default_ps_max_latency_us, (unsigned long)S32_MAX));

4504
	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4505
	nvme_mpath_init_ctrl(ctrl);
4506

4507
	return 0;
4508
out_free_name:
4509
	nvme_put_ctrl(ctrl);
4510
	kfree_const(ctrl->device->kobj.name);
4511
out_release_instance:
4512
	ida_free(&nvme_instance_ida, ctrl->instance);
4513
out:
4514 4515
	if (ctrl->discard_page)
		__free_page(ctrl->discard_page);
4516 4517
	return ret;
}
4518
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4519

4520 4521
static void nvme_start_ns_queue(struct nvme_ns *ns)
{
M
Ming Lei 已提交
4522 4523
	if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
		blk_mq_unquiesce_queue(ns->queue);
4524 4525 4526 4527
}

static void nvme_stop_ns_queue(struct nvme_ns *ns)
{
M
Ming Lei 已提交
4528 4529
	if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
		blk_mq_quiesce_queue(ns->queue);
M
Ming Lei 已提交
4530 4531
	else
		blk_mq_wait_quiesce_done(ns->queue);
4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546
}

/*
 * Prepare a queue for teardown.
 *
 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
 * the capacity to 0 after that to avoid blocking dispatchers that may be
 * holding bd_butex.  This will end buffered writers dirtying pages that can't
 * be synced.
 */
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
	if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
		return;

4547
	blk_mark_disk_dead(ns->disk);
4548 4549 4550 4551 4552
	nvme_start_ns_queue(ns);

	set_capacity_and_notify(ns->disk, 0);
}

4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563
/**
 * nvme_kill_queues(): Ends all namespace queues
 * @ctrl: the dead controller that needs to end
 *
 * Call this function when the driver determines it is unable to get the
 * controller in a state capable of servicing IO.
 */
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4564
	down_read(&ctrl->namespaces_rwsem);
M
Ming Lei 已提交
4565

4566
	/* Forcibly unquiesce queues to avoid blocking dispatch */
I
Igor Konopko 已提交
4567
	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4568
		nvme_start_admin_queue(ctrl);
4569

4570 4571
	list_for_each_entry(ns, &ctrl->namespaces, list)
		nvme_set_queue_dying(ns);
4572

4573
	up_read(&ctrl->namespaces_rwsem);
4574
}
4575
EXPORT_SYMBOL_GPL(nvme_kill_queues);
4576

K
Keith Busch 已提交
4577 4578 4579 4580
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4581
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4582 4583
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_unfreeze_queue(ns->queue);
4584
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4585 4586 4587
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);

4588
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
K
Keith Busch 已提交
4589 4590 4591
{
	struct nvme_ns *ns;

4592
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4593 4594 4595 4596 4597
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
		if (timeout <= 0)
			break;
	}
4598
	up_read(&ctrl->namespaces_rwsem);
4599
	return timeout;
K
Keith Busch 已提交
4600 4601 4602 4603 4604 4605 4606
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4607
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4608 4609
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_freeze_queue_wait(ns->queue);
4610
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4611 4612 4613 4614 4615 4616 4617
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4618
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4619
	list_for_each_entry(ns, &ctrl->namespaces, list)
4620
		blk_freeze_queue_start(ns->queue);
4621
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4622 4623 4624
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);

4625
void nvme_stop_queues(struct nvme_ctrl *ctrl)
4626 4627 4628
{
	struct nvme_ns *ns;

4629
	down_read(&ctrl->namespaces_rwsem);
4630
	list_for_each_entry(ns, &ctrl->namespaces, list)
4631
		nvme_stop_ns_queue(ns);
4632
	up_read(&ctrl->namespaces_rwsem);
4633
}
4634
EXPORT_SYMBOL_GPL(nvme_stop_queues);
4635

4636
void nvme_start_queues(struct nvme_ctrl *ctrl)
4637 4638 4639
{
	struct nvme_ns *ns;

4640
	down_read(&ctrl->namespaces_rwsem);
4641
	list_for_each_entry(ns, &ctrl->namespaces, list)
4642
		nvme_start_ns_queue(ns);
4643
	up_read(&ctrl->namespaces_rwsem);
4644
}
4645
EXPORT_SYMBOL_GPL(nvme_start_queues);
4646

4647 4648
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
{
M
Ming Lei 已提交
4649 4650
	if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
		blk_mq_quiesce_queue(ctrl->admin_q);
M
Ming Lei 已提交
4651 4652
	else
		blk_mq_wait_quiesce_done(ctrl->admin_q);
4653 4654 4655 4656 4657
}
EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);

void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
{
M
Ming Lei 已提交
4658 4659
	if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
		blk_mq_unquiesce_queue(ctrl->admin_q);
4660 4661 4662
}
EXPORT_SYMBOL_GPL(nvme_start_admin_queue);

C
Chao Leng 已提交
4663
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
K
Keith Busch 已提交
4664 4665 4666 4667 4668 4669 4670
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_sync_queue(ns->queue);
	up_read(&ctrl->namespaces_rwsem);
C
Chao Leng 已提交
4671 4672
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4673

C
Chao Leng 已提交
4674 4675 4676
void nvme_sync_queues(struct nvme_ctrl *ctrl)
{
	nvme_sync_io_queues(ctrl);
4677 4678
	if (ctrl->admin_q)
		blk_sync_queue(ctrl->admin_q);
K
Keith Busch 已提交
4679 4680 4681
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);

4682
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4683
{
4684 4685 4686
	if (file->f_op != &nvme_dev_fops)
		return NULL;
	return file->private_data;
4687
}
4688
EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4689

4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707
/*
 * Check we didn't inadvertently grow the command structure sizes:
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
K
Keith Busch 已提交
4708 4709
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4710
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4711 4712 4713 4714 4715 4716 4717
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
}


4718
static int __init nvme_core_init(void)
4719
{
4720
	int result = -ENOMEM;
4721

4722 4723
	_nvme_check_size();

4724 4725 4726
	nvme_wq = alloc_workqueue("nvme-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_wq)
4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737
		goto out;

	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_reset_wq)
		goto destroy_wq;

	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_delete_wq)
		goto destroy_reset_wq;
4738

4739 4740
	result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
			NVME_MINORS, "nvme");
4741
	if (result < 0)
4742
		goto destroy_delete_wq;
4743 4744 4745 4746 4747 4748

	nvme_class = class_create(THIS_MODULE, "nvme");
	if (IS_ERR(nvme_class)) {
		result = PTR_ERR(nvme_class);
		goto unregister_chrdev;
	}
4749
	nvme_class->dev_uevent = nvme_class_uevent;
4750

C
Christoph Hellwig 已提交
4751 4752 4753 4754 4755
	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
	if (IS_ERR(nvme_subsys_class)) {
		result = PTR_ERR(nvme_subsys_class);
		goto destroy_class;
	}
4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767

	result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
				     "nvme-generic");
	if (result < 0)
		goto destroy_subsys_class;

	nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
	if (IS_ERR(nvme_ns_chr_class)) {
		result = PTR_ERR(nvme_ns_chr_class);
		goto unregister_generic_ns;
	}

4768
	return 0;
4769

4770 4771 4772 4773
unregister_generic_ns:
	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
	class_destroy(nvme_subsys_class);
C
Christoph Hellwig 已提交
4774 4775
destroy_class:
	class_destroy(nvme_class);
4776
unregister_chrdev:
4777
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4778 4779 4780 4781
destroy_delete_wq:
	destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
	destroy_workqueue(nvme_reset_wq);
4782 4783
destroy_wq:
	destroy_workqueue(nvme_wq);
4784
out:
4785
	return result;
4786 4787
}

4788
static void __exit nvme_core_exit(void)
4789
{
4790
	class_destroy(nvme_ns_chr_class);
C
Christoph Hellwig 已提交
4791
	class_destroy(nvme_subsys_class);
4792
	class_destroy(nvme_class);
4793
	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4794
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4795 4796
	destroy_workqueue(nvme_delete_wq);
	destroy_workqueue(nvme_reset_wq);
4797
	destroy_workqueue(nvme_wq);
4798
	ida_destroy(&nvme_ns_chr_minor_ida);
M
Max Gurtovoy 已提交
4799
	ida_destroy(&nvme_instance_ida);
4800
}
4801 4802 4803 4804 4805

MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
module_init(nvme_core_init);
module_exit(nvme_core_exit);