core.c 122.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
9
#include <linux/compat.h>
10
#include <linux/delay.h>
11
#include <linux/errno.h>
12
#include <linux/hdreg.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/backing-dev.h>
16
#include <linux/list_sort.h>
17 18
#include <linux/slab.h>
#include <linux/types.h>
19 20 21
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
22
#include <linux/pm_qos.h>
23
#include <asm/unaligned.h>
24 25

#include "nvme.h"
S
Sagi Grimberg 已提交
26
#include "fabrics.h"
27

H
Hannes Reinecke 已提交
28 29 30
#define CREATE_TRACE_POINTS
#include "trace.h"

31 32
#define NVME_MINORS		(1U << MINORBITS)

33 34
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
35
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36
EXPORT_SYMBOL_GPL(admin_timeout);
37

38 39
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41
EXPORT_SYMBOL_GPL(nvme_io_timeout);
42

43
static unsigned char shutdown_timeout = 5;
44 45 46
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

47 48
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
K
Keith Busch 已提交
49
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50

51
static unsigned long default_ps_max_latency_us = 100000;
52 53 54 55
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

56 57 58 59
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

60 61 62 63
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");

64 65 66 67 68
/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
69 70
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
71 72 73 74
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
75 76 77
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

78 79 80 81 82 83
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);

C
Christoph Hellwig 已提交
84 85
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
86

87
static DEFINE_IDA(nvme_instance_ida);
88
static dev_t nvme_ctrl_base_chr_devt;
89
static struct class *nvme_class;
C
Christoph Hellwig 已提交
90
static struct class *nvme_subsys_class;
91

92
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
93 94 95
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);

96 97 98 99 100 101 102 103
/*
 * Prepare a queue for teardown.
 *
 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
 * the capacity to 0 after that to avoid blocking dispatchers that may be
 * holding bd_butex.  This will end buffered writers dirtying pages that can't
 * be synced.
 */
104 105
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
C
Christoph Hellwig 已提交
106
	if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
107
		return;
108

109 110
	blk_set_queue_dying(ns->queue);
	blk_mq_unquiesce_queue(ns->queue);
111

112
	set_capacity_and_notify(ns->disk, 0);
113
}
114

115 116 117 118 119
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
	/*
	 * Only new queue scan work when admin and IO queues are both alive
	 */
K
Keith Busch 已提交
120
	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
121 122 123
		queue_work(nvme_wq, &ctrl->scan_work);
}

124 125 126 127 128 129
/*
 * Use this function to proceed with scheduling reset_work for a controller
 * that had previously been set to the resetting state. This is intended for
 * code paths that can't be interrupted by other reset attempts. A hot removal
 * may prevent this from succeeding.
 */
130
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
131 132 133 134 135 136 137
{
	if (ctrl->state != NVME_CTRL_RESETTING)
		return -EBUSY;
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
		return -EBUSY;
	return 0;
}
138
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void nvme_failfast_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, failfast_work);

	if (ctrl->state != NVME_CTRL_CONNECTING)
		return;

	set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
	dev_info(ctrl->device, "failfast expired\n");
	nvme_kick_requeue_lists(ctrl);
}

static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
		return;

	schedule_delayed_work(&ctrl->failfast_work,
			      ctrl->opts->fast_io_fail_tmo * HZ);
}

static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
{
	if (!ctrl->opts)
		return;

	cancel_delayed_work_sync(&ctrl->failfast_work);
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
}


172 173 174 175
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
176
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
177 178 179 180 181
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

182
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
183 184 185 186
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
187
	if (!ret) {
188
		flush_work(&ctrl->reset_work);
K
Keith Busch 已提交
189
		if (ctrl->state != NVME_CTRL_LIVE)
190 191 192
			ret = -ENETRESET;
	}

193 194 195
	return ret;
}

196
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
197
{
198 199 200
	dev_info(ctrl->device,
		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);

201
	flush_work(&ctrl->reset_work);
202 203
	nvme_stop_ctrl(ctrl);
	nvme_remove_namespaces(ctrl);
204
	ctrl->ops->delete_ctrl(ctrl);
205
	nvme_uninit_ctrl(ctrl);
206 207
}

208 209 210 211 212 213 214 215
static void nvme_delete_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, delete_work);

	nvme_do_delete_ctrl(ctrl);
}

216 217 218 219
int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
		return -EBUSY;
220
	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
221 222 223 224 225
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);

226
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
227 228
{
	/*
229 230
	 * Keep a reference until nvme_do_delete_ctrl() complete,
	 * since ->delete_ctrl can free the controller.
231 232
	 */
	nvme_get_ctrl(ctrl);
233
	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
234
		nvme_do_delete_ctrl(ctrl);
235 236 237
	nvme_put_ctrl(ctrl);
}

238
static blk_status_t nvme_error_status(u16 status)
239
{
240
	switch (status & 0x7ff) {
241
	case NVME_SC_SUCCESS:
242
		return BLK_STS_OK;
243
	case NVME_SC_CAP_EXCEEDED:
244
		return BLK_STS_NOSPC;
245
	case NVME_SC_LBA_RANGE:
246 247
	case NVME_SC_CMD_INTERRUPTED:
	case NVME_SC_NS_NOT_READY:
248 249
		return BLK_STS_TARGET;
	case NVME_SC_BAD_ATTRIBUTES:
250
	case NVME_SC_ONCS_NOT_SUPPORTED:
251 252 253
	case NVME_SC_INVALID_OPCODE:
	case NVME_SC_INVALID_FIELD:
	case NVME_SC_INVALID_NS:
254
		return BLK_STS_NOTSUPP;
255 256 257
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
258 259
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
260
	case NVME_SC_COMPARE_FAILED:
261
		return BLK_STS_MEDIUM;
262 263 264 265 266 267 268
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
269 270
	case NVME_SC_HOST_PATH_ERROR:
		return BLK_STS_TRANSPORT;
K
Keith Busch 已提交
271 272 273 274
	case NVME_SC_ZONE_TOO_MANY_ACTIVE:
		return BLK_STS_ZONE_ACTIVE_RESOURCE;
	case NVME_SC_ZONE_TOO_MANY_OPEN:
		return BLK_STS_ZONE_OPEN_RESOURCE;
275 276
	default:
		return BLK_STS_IOERR;
277 278 279
	}
}

280 281 282 283 284 285 286
static void nvme_retry_req(struct request *req)
{
	unsigned long delay = 0;
	u16 crd;

	/* The mask and shift result must be <= 3 */
	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
287 288
	if (crd)
		delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
289 290 291 292 293 294

	nvme_req(req)->retries++;
	blk_mq_requeue_request(req, false);
	blk_mq_delay_kick_requeue_list(req->q, delay);
}

295 296 297 298 299 300 301
enum nvme_disposition {
	COMPLETE,
	RETRY,
	FAILOVER,
};

static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
302
{
303 304
	if (likely(nvme_req(req)->status == 0))
		return COMPLETE;
305

306 307 308 309
	if (blk_noretry_request(req) ||
	    (nvme_req(req)->status & NVME_SC_DNR) ||
	    nvme_req(req)->retries >= nvme_max_retries)
		return COMPLETE;
310

311
	if (req->cmd_flags & REQ_NVME_MPATH) {
312 313
		if (nvme_is_path_error(nvme_req(req)->status) ||
		    blk_queue_dying(req->q))
314
			return FAILOVER;
315 316 317
	} else {
		if (blk_queue_dying(req->q))
			return COMPLETE;
318
	}
319

320 321
	return RETRY;
}
322

323 324 325
static inline void nvme_end_req(struct request *req)
{
	blk_status_t status = nvme_error_status(nvme_req(req)->status);
326

327 328
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
	    req_op(req) == REQ_OP_ZONE_APPEND)
K
Keith Busch 已提交
329 330
		req->__sector = nvme_lba_to_sect(req->q->queuedata,
			le64_to_cpu(nvme_req(req)->result.u64));
H
Hannes Reinecke 已提交
331

332
	nvme_trace_bio_complete(req);
333
	blk_mq_end_request(req, status);
334
}
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355

void nvme_complete_rq(struct request *req)
{
	trace_nvme_complete_rq(req);
	nvme_cleanup_cmd(req);

	if (nvme_req(req)->ctrl->kas)
		nvme_req(req)->ctrl->comp_seen = true;

	switch (nvme_decide_disposition(req)) {
	case COMPLETE:
		nvme_end_req(req);
		return;
	case RETRY:
		nvme_retry_req(req);
		return;
	case FAILOVER:
		nvme_failover_req(req);
		return;
	}
}
356 357
EXPORT_SYMBOL_GPL(nvme_complete_rq);

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
/*
 * Called to unwind from ->queue_rq on a failed command submission so that the
 * multipathing code gets called to potentially failover to another path.
 * The caller needs to unwind all transport specific resource allocations and
 * must return propagate the return value.
 */
blk_status_t nvme_host_path_error(struct request *req)
{
	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
	blk_mq_set_request_complete(req);
	nvme_complete_rq(req);
	return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);

373
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
374 375 376 377
{
	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

378 379 380 381
	/* don't abort one completed request */
	if (blk_mq_request_completed(req))
		return true;

382
	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
383
	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
384
	blk_mq_complete_request(req);
385
	return true;
386 387 388
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

C
Chao Leng 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->tagset) {
		blk_mq_tagset_busy_iter(ctrl->tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);

void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
	if (ctrl->admin_tagset) {
		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
				nvme_cancel_request, ctrl);
		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);

409 410 411
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
412
	enum nvme_ctrl_state old_state;
413
	unsigned long flags;
414 415
	bool changed = false;

416
	spin_lock_irqsave(&ctrl->lock, flags);
417 418

	old_state = ctrl->state;
419 420 421
	switch (new_state) {
	case NVME_CTRL_LIVE:
		switch (old_state) {
422
		case NVME_CTRL_NEW:
423
		case NVME_CTRL_RESETTING:
424
		case NVME_CTRL_CONNECTING:
425
			changed = true;
426
			fallthrough;
427 428 429 430 431 432 433
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
434 435
		case NVME_CTRL_LIVE:
			changed = true;
436
			fallthrough;
437 438 439 440
		default:
			break;
		}
		break;
441
	case NVME_CTRL_CONNECTING:
442
		switch (old_state) {
443
		case NVME_CTRL_NEW:
444
		case NVME_CTRL_RESETTING:
445
			changed = true;
446
			fallthrough;
447 448 449 450 451 452 453 454
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
		case NVME_CTRL_RESETTING:
455
		case NVME_CTRL_CONNECTING:
456
			changed = true;
457
			fallthrough;
458 459 460 461
		default:
			break;
		}
		break;
462 463 464 465 466
	case NVME_CTRL_DELETING_NOIO:
		switch (old_state) {
		case NVME_CTRL_DELETING:
		case NVME_CTRL_DEAD:
			changed = true;
467
			fallthrough;
468 469 470 471
		default:
			break;
		}
		break;
472 473 474 475
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
476
			fallthrough;
477 478 479 480
		default:
			break;
		}
		break;
481 482 483 484
	default:
		break;
	}

485
	if (changed) {
486
		ctrl->state = new_state;
487 488
		wake_up_all(&ctrl->state_wq);
	}
489

490
	spin_unlock_irqrestore(&ctrl->lock, flags);
491 492 493 494 495 496
	if (!changed)
		return false;

	if (ctrl->state == NVME_CTRL_LIVE) {
		if (old_state == NVME_CTRL_CONNECTING)
			nvme_stop_failfast_work(ctrl);
497
		nvme_kick_requeue_lists(ctrl);
498 499 500 501
	} else if (ctrl->state == NVME_CTRL_CONNECTING &&
		old_state == NVME_CTRL_RESETTING) {
		nvme_start_failfast_work(ctrl);
	}
502 503 504 505
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

506 507 508 509 510 511 512 513 514 515 516 517
/*
 * Returns true for sink states that can't ever transition back to live.
 */
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
{
	switch (ctrl->state) {
	case NVME_CTRL_NEW:
	case NVME_CTRL_LIVE:
	case NVME_CTRL_RESETTING:
	case NVME_CTRL_CONNECTING:
		return false;
	case NVME_CTRL_DELETING:
518
	case NVME_CTRL_DELETING_NOIO:
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	case NVME_CTRL_DEAD:
		return true;
	default:
		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
		return true;
	}
}

/*
 * Waits for the controller state to be resetting, or returns false if it is
 * not possible to ever transition to that state.
 */
bool nvme_wait_reset(struct nvme_ctrl *ctrl)
{
	wait_event(ctrl->state_wq,
		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
		   nvme_state_terminal(ctrl));
	return ctrl->state == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);

C
Christoph Hellwig 已提交
540 541 542 543 544
static void nvme_free_ns_head(struct kref *ref)
{
	struct nvme_ns_head *head =
		container_of(ref, struct nvme_ns_head, ref);

545
	nvme_mpath_remove_disk(head);
C
Christoph Hellwig 已提交
546
	ida_simple_remove(&head->subsys->ns_ida, head->instance);
547
	cleanup_srcu_struct(&head->srcu);
548
	nvme_put_subsystem(head->subsys);
C
Christoph Hellwig 已提交
549 550 551 552 553 554 555 556
	kfree(head);
}

static void nvme_put_ns_head(struct nvme_ns_head *head)
{
	kref_put(&head->ref, nvme_free_ns_head);
}

557 558 559 560
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

561 562
	if (ns->ndev)
		nvme_nvm_unregister(ns);
563 564

	put_disk(ns->disk);
C
Christoph Hellwig 已提交
565
	nvme_put_ns_head(ns->head);
566
	nvme_put_ctrl(ns->ctrl);
567 568 569
	kfree(ns);
}

570
void nvme_put_ns(struct nvme_ns *ns)
571 572 573
{
	kref_put(&ns->kref, nvme_free_ns);
}
574
EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
575

576 577 578 579 580 581 582 583 584
static inline void nvme_clear_nvme_request(struct request *req)
{
	if (!(req->rq_flags & RQF_DONTPREP)) {
		nvme_req(req)->retries = 0;
		nvme_req(req)->flags = 0;
		req->rq_flags |= RQF_DONTPREP;
	}
}

585
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
586
{
587 588
	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
589

590 591 592
static inline void nvme_init_request(struct request *req,
		struct nvme_command *cmd)
{
593 594 595
	if (req->q->queuedata)
		req->timeout = NVME_IO_TIMEOUT;
	else /* no queuedata implies admin queue */
596
		req->timeout = NVME_ADMIN_TIMEOUT;
597 598

	req->cmd_flags |= REQ_FAILFAST_DRIVER;
599
	nvme_clear_nvme_request(req);
600
	nvme_req(req)->cmd = cmd;
601
}
602

603 604 605 606 607 608 609 610
struct request *nvme_alloc_request(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags)
{
	struct request *req;

	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
611 612
	return req;
}
613
EXPORT_SYMBOL_GPL(nvme_alloc_request);
614

615
static struct request *nvme_alloc_request_qid(struct request_queue *q,
616 617 618 619 620 621 622 623 624 625 626
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
	struct request *req;

	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
			qid ? qid - 1 : 0);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
	return req;
}

627 628 629 630 631 632 633
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));

	c.directive.opcode = nvme_admin_directive_send;
A
Arnav Dawn 已提交
634
	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
	c.directive.dtype = NVME_DIR_IDENTIFY;
	c.directive.tdtype = NVME_DIR_STREAMS;
	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
}

static int nvme_disable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, false);
}

static int nvme_enable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, true);
}

static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
				  struct streams_directive_params *s, u32 nsid)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	memset(s, 0, sizeof(*s));

	c.directive.opcode = nvme_admin_directive_recv;
	c.directive.nsid = cpu_to_le32(nsid);
K
Keith Busch 已提交
663
	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
	c.directive.dtype = NVME_DIR_STREAMS;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
}

static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
	struct streams_directive_params s;
	int ret;

	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
		return 0;
	if (!streams)
		return 0;

	ret = nvme_enable_streams(ctrl);
	if (ret)
		return ret;

A
Arnav Dawn 已提交
684
	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
685
	if (ret)
686
		goto out_disable_stream;
687 688 689 690 691

	ctrl->nssa = le16_to_cpu(s.nssa);
	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
		dev_info(ctrl->device, "too few streams (%u) available\n",
					ctrl->nssa);
692
		goto out_disable_stream;
693 694
	}

695
	ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
696 697
	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
	return 0;
698 699 700 701

out_disable_stream:
	nvme_disable_streams(ctrl);
	return ret;
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
}

/*
 * Check if 'req' has a write hint associated with it. If it does, assign
 * a valid namespace stream to the write.
 */
static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
				     struct request *req, u16 *control,
				     u32 *dsmgmt)
{
	enum rw_hint streamid = req->write_hint;

	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
		streamid = 0;
	else {
		streamid--;
		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
			return;

		*control |= NVME_RW_DTYPE_STREAMS;
		*dsmgmt |= streamid << 16;
	}

	if (streamid < ARRAY_SIZE(req->q->write_hints))
		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}

729 730 731 732 733 734 735 736
static void nvme_setup_passthrough(struct request *req,
		struct nvme_command *cmd)
{
	memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
	/* passthru commands should let the driver set the SGL flags */
	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
}

M
Ming Lin 已提交
737 738 739 740
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
	cmnd->common.opcode = nvme_cmd_flush;
C
Christoph Hellwig 已提交
741
	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
M
Ming Lin 已提交
742 743
}

744
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
745 746
		struct nvme_command *cmnd)
{
747
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
M
Ming Lin 已提交
748
	struct nvme_dsm_range *range;
749
	struct bio *bio;
M
Ming Lin 已提交
750

751 752 753 754 755 756 757 758
	/*
	 * Some devices do not consider the DSM 'Number of Ranges' field when
	 * determining how much data to DMA. Always allocate memory for maximum
	 * number of segments to prevent device reading beyond end of buffer.
	 */
	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;

	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
759 760 761 762 763 764 765 766 767 768 769
	if (!range) {
		/*
		 * If we fail allocation our range, fallback to the controller
		 * discard page. If that's also busy, it's safe to return
		 * busy, as we know we can make progress once that's freed.
		 */
		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
			return BLK_STS_RESOURCE;

		range = page_address(ns->ctrl->discard_page);
	}
M
Ming Lin 已提交
770

771
	__rq_for_each_bio(bio, req) {
772
		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
773 774
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

K
Keith Busch 已提交
775 776 777 778 779
		if (n < segments) {
			range[n].cattr = cpu_to_le32(0);
			range[n].nlb = cpu_to_le32(nlb);
			range[n].slba = cpu_to_le64(slba);
		}
780 781 782 783
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
784 785 786 787
		if (virt_to_page(range) == ns->ctrl->discard_page)
			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
		else
			kfree(range);
788
		return BLK_STS_IOERR;
789
	}
M
Ming Lin 已提交
790 791

	cmnd->dsm.opcode = nvme_cmd_dsm;
C
Christoph Hellwig 已提交
792
	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
793
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
M
Ming Lin 已提交
794 795
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

796 797
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
798
	req->special_vec.bv_len = alloc_size;
799
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
M
Ming Lin 已提交
800

801
	return BLK_STS_OK;
M
Ming Lin 已提交
802 803
}

804 805 806 807 808 809 810 811 812
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
{
	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		return nvme_setup_discard(ns, req, cmnd);

	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
	cmnd->write_zeroes.slba =
813
		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
814 815 816 817 818 819
	cmnd->write_zeroes.length =
		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
	cmnd->write_zeroes.control = 0;
	return BLK_STS_OK;
}

820
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
K
Keith Busch 已提交
821 822
		struct request *req, struct nvme_command *cmnd,
		enum nvme_opcode op)
M
Ming Lin 已提交
823
{
824
	struct nvme_ctrl *ctrl = ns->ctrl;
M
Ming Lin 已提交
825 826 827 828 829 830 831 832 833 834 835
	u16 control = 0;
	u32 dsmgmt = 0;

	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

K
Keith Busch 已提交
836
	cmnd->rw.opcode = op;
C
Christoph Hellwig 已提交
837
	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
838
	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
M
Ming Lin 已提交
839 840
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);

841 842 843
	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);

M
Ming Lin 已提交
844
	if (ns->ms) {
845 846 847 848 849 850 851 852 853 854 855 856
		/*
		 * If formated with metadata, the block layer always provides a
		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
		 * we enable the PRACT bit for protection information or set the
		 * namespace capacity to zero to prevent any I/O.
		 */
		if (!blk_integrity_rq(req)) {
			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
				return BLK_STS_NOTSUPP;
			control |= NVME_RW_PRINFO_PRACT;
		}

M
Ming Lin 已提交
857 858 859 860 861 862 863 864
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
K
Keith Busch 已提交
865 866
			if (op == nvme_cmd_zone_append)
				control |= NVME_RW_APPEND_PIREMAP;
867
			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
M
Ming Lin 已提交
868 869 870 871 872 873
			break;
		}
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
874
	return 0;
M
Ming Lin 已提交
875 876
}

877 878 879
void nvme_cleanup_cmd(struct request *req)
{
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
M
Minwoo Im 已提交
880
		struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
881 882
		struct page *page = req->special_vec.bv_page;

M
Minwoo Im 已提交
883 884
		if (page == ctrl->discard_page)
			clear_bit_unlock(0, &ctrl->discard_page_busy);
885 886
		else
			kfree(page_address(page) + req->special_vec.bv_offset);
887 888 889 890
	}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

891
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
892 893
		struct nvme_command *cmd)
{
894
	blk_status_t ret = BLK_STS_OK;
M
Ming Lin 已提交
895

896
	nvme_clear_nvme_request(req);
897

898
	memset(cmd, 0, sizeof(*cmd));
899 900 901
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
902
		nvme_setup_passthrough(req, cmd);
903 904
		break;
	case REQ_OP_FLUSH:
M
Ming Lin 已提交
905
		nvme_setup_flush(ns, cmd);
906
		break;
K
Keith Busch 已提交
907 908 909 910 911 912 913 914 915 916 917 918 919
	case REQ_OP_ZONE_RESET_ALL:
	case REQ_OP_ZONE_RESET:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
		break;
	case REQ_OP_ZONE_OPEN:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
		break;
	case REQ_OP_ZONE_CLOSE:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
		break;
	case REQ_OP_ZONE_FINISH:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
		break;
920
	case REQ_OP_WRITE_ZEROES:
921 922
		ret = nvme_setup_write_zeroes(ns, req, cmd);
		break;
923
	case REQ_OP_DISCARD:
M
Ming Lin 已提交
924
		ret = nvme_setup_discard(ns, req, cmd);
925 926
		break;
	case REQ_OP_READ:
K
Keith Busch 已提交
927 928
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
		break;
929
	case REQ_OP_WRITE:
K
Keith Busch 已提交
930 931 932 933
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
		break;
	case REQ_OP_ZONE_APPEND:
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
934 935 936
		break;
	default:
		WARN_ON_ONCE(1);
937
		return BLK_STS_IOERR;
938
	}
M
Ming Lin 已提交
939

940
	cmd->common.command_id = req->tag;
K
Keith Busch 已提交
941
	trace_nvme_setup_cmd(req, cmd);
M
Ming Lin 已提交
942 943 944 945
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = NULL;
	complete(waiting);
}

static void nvme_execute_rq_polled(struct request_queue *q,
		struct gendisk *bd_disk, struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);

	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));

	rq->cmd_flags |= REQ_HIPRI;
	rq->end_io_data = &wait;
963
	blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
964 965 966 967 968 969 970

	while (!completion_done(&wait)) {
		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
		cond_resched();
	}
}

971 972 973 974 975
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
976
		union nvme_result *result, void *buffer, unsigned bufflen,
977
		unsigned timeout, int qid, int at_head,
978
		blk_mq_req_flags_t flags, bool poll)
979 980 981 982
{
	struct request *req;
	int ret;

983 984 985 986
	if (qid == NVME_QID_ANY)
		req = nvme_alloc_request(q, cmd, flags);
	else
		req = nvme_alloc_request_qid(q, cmd, flags, qid);
987 988 989
	if (IS_ERR(req))
		return PTR_ERR(req);

990 991
	if (timeout)
		req->timeout = timeout;
992

993 994 995 996
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
997 998
	}

999 1000 1001
	if (poll)
		nvme_execute_rq_polled(req->q, NULL, req, at_head);
	else
1002
		blk_execute_rq(NULL, req, at_head);
1003 1004
	if (result)
		*result = nvme_req(req)->result;
1005 1006 1007 1008
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
1009 1010 1011 1012
 out:
	blk_mq_free_request(req);
	return ret;
}
1013
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1014 1015 1016 1017

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
1018
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1019
			NVME_QID_ANY, 0, 0, false);
1020
}
1021
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
		unsigned len, u32 seed, bool write)
{
	struct bio_integrity_payload *bip;
	int ret = -ENOMEM;
	void *buf;

	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		goto out;

	ret = -EFAULT;
	if (write && copy_from_user(buf, ubuf, len))
		goto out_free_meta;

	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
	if (IS_ERR(bip)) {
		ret = PTR_ERR(bip);
		goto out_free_meta;
	}

	bip->bip_iter.bi_size = len;
	bip->bip_iter.bi_sector = seed;
	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
			offset_in_page(buf));
	if (ret == len)
		return buf;
	ret = -ENOMEM;
out_free_meta:
	kfree(buf);
out:
	return ERR_PTR(ret);
}

1057 1058 1059 1060
static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
1061
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1062 1063
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
1064
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	default:
		break;
	}
	return 0;
}

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = 0;

	if (ns) {
		if (ns->head->effects)
			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
			dev_warn(ctrl->device,
				 "IO command:%02x has unhandled effects:%08x\n",
				 opcode, effects);
		return 0;
	}

	if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
	effects |= nvme_known_admin_effects(opcode);

	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);

static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			       u8 opcode)
{
	u32 effects = nvme_command_effects(ctrl, ns, opcode);

	/*
	 * For simplicity, IO to all namespaces is quiesced even if the command
	 * effects say only one namespace is affected.
	 */
1102
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
		mutex_lock(&ctrl->scan_lock);
		mutex_lock(&ctrl->subsys->lock);
		nvme_mpath_start_freeze(ctrl->subsys);
		nvme_mpath_wait_freeze(ctrl->subsys);
		nvme_start_freeze(ctrl);
		nvme_wait_freeze(ctrl);
	}
	return effects;
}

static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
{
1115
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
		nvme_unfreeze(ctrl);
		nvme_mpath_unfreeze(ctrl->subsys);
		mutex_unlock(&ctrl->subsys->lock);
		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
		mutex_unlock(&ctrl->scan_lock);
	}
	if (effects & NVME_CMD_EFFECTS_CCC)
		nvme_init_identify(ctrl);
	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
		nvme_queue_scan(ctrl);
		flush_work(&ctrl->scan_work);
	}
}

1130 1131 1132 1133 1134 1135 1136 1137 1138
void nvme_execute_passthru_rq(struct request *rq)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;
	struct gendisk *disk = ns ? ns->disk : NULL;
	u32 effects;

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1139
	blk_execute_rq(disk, rq, 0);
1140 1141
	if (effects) /* nothing to be done for zero cmd effects */
		nvme_passthru_end(ctrl, effects);
1142 1143 1144
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);

1145
static int nvme_submit_user_cmd(struct request_queue *q,
1146 1147
		struct nvme_command *cmd, void __user *ubuffer,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
1148
		u32 meta_seed, u64 *result, unsigned timeout)
1149
{
1150
	bool write = nvme_is_write(cmd);
1151
	struct nvme_ns *ns = q->queuedata;
1152
	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
1153
	struct request *req;
1154 1155
	struct bio *bio = NULL;
	void *meta = NULL;
1156 1157
	int ret;

1158
	req = nvme_alloc_request(q, cmd, 0);
1159 1160 1161
	if (IS_ERR(req))
		return PTR_ERR(req);

1162 1163
	if (timeout)
		req->timeout = timeout;
1164
	nvme_req(req)->flags |= NVME_REQ_USERCMD;
1165 1166

	if (ubuffer && bufflen) {
1167 1168 1169 1170 1171
		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
				GFP_KERNEL);
		if (ret)
			goto out;
		bio = req->bio;
1172 1173
		if (bdev)
			bio_set_dev(bio, bdev);
1174
		if (bdev && meta_buffer && meta_len) {
1175 1176 1177 1178
			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
					meta_seed, write);
			if (IS_ERR(meta)) {
				ret = PTR_ERR(meta);
1179 1180
				goto out_unmap;
			}
1181
			req->cmd_flags |= REQ_INTEGRITY;
1182 1183
		}
	}
1184

1185
	nvme_execute_passthru_rq(req);
1186 1187 1188 1189
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
1190
	if (result)
1191
		*result = le64_to_cpu(nvme_req(req)->result.u64);
1192 1193 1194 1195 1196 1197
	if (meta && !ret && !write) {
		if (copy_to_user(meta_buffer, meta, meta_len))
			ret = -EFAULT;
	}
	kfree(meta);
 out_unmap:
1198
	if (bio)
1199
		blk_rq_unmap_user(bio);
1200 1201 1202 1203 1204
 out:
	blk_mq_free_request(req);
	return ret;
}

1205
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
S
Sagi Grimberg 已提交
1206 1207
{
	struct nvme_ctrl *ctrl = rq->end_io_data;
1208 1209
	unsigned long flags;
	bool startka = false;
S
Sagi Grimberg 已提交
1210 1211 1212

	blk_mq_free_request(rq);

1213
	if (status) {
S
Sagi Grimberg 已提交
1214
		dev_err(ctrl->device,
1215 1216
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
S
Sagi Grimberg 已提交
1217 1218 1219
		return;
	}

1220
	ctrl->comp_seen = false;
1221 1222 1223 1224 1225 1226
	spin_lock_irqsave(&ctrl->lock, flags);
	if (ctrl->state == NVME_CTRL_LIVE ||
	    ctrl->state == NVME_CTRL_CONNECTING)
		startka = true;
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
1227
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
S
Sagi Grimberg 已提交
1228 1229 1230 1231 1232 1233
}

static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
	struct request *rq;

1234 1235
	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
			BLK_MQ_REQ_RESERVED);
S
Sagi Grimberg 已提交
1236 1237 1238 1239 1240 1241
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;

1242
	blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
S
Sagi Grimberg 已提交
1243 1244 1245 1246 1247 1248 1249 1250

	return 0;
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);
1251 1252 1253 1254 1255 1256
	bool comp_seen = ctrl->comp_seen;

	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
		dev_dbg(ctrl->device,
			"reschedule traffic based keep-alive timer\n");
		ctrl->comp_seen = false;
1257
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1258 1259
		return;
	}
S
Sagi Grimberg 已提交
1260 1261 1262 1263

	if (nvme_keep_alive(ctrl)) {
		/* allocation failure, reset the controller */
		dev_err(ctrl->device, "keep-alive failed\n");
1264
		nvme_reset_ctrl(ctrl);
S
Sagi Grimberg 已提交
1265 1266 1267 1268
		return;
	}
}

1269
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
S
Sagi Grimberg 已提交
1270 1271 1272 1273
{
	if (unlikely(ctrl->kato == 0))
		return;

1274
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
S
Sagi Grimberg 已提交
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
/*
 * In NVMe 1.0 the CNS field was just a binary controller or namespace
 * flag, thus sending any new CNS opcodes has a big chance of not working.
 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
 * (but not for any later version).
 */
static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
{
	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
		return ctrl->vs < NVME_VS(1, 2, 0);
	return ctrl->vs < NVME_VS(1, 1, 0);
}

K
Keith Busch 已提交
1299
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1300 1301 1302 1303 1304 1305
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
1306
	c.identify.cns = NVME_ID_CNS_CTRL;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

1319 1320 1321 1322 1323
static bool nvme_multi_css(struct nvme_ctrl *ctrl)
{
	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}

1324
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1325
		struct nvme_ns_id_desc *cur, bool *csi_seen)
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
{
	const char *warn_str = "ctrl returned bogus length:";
	void *data = cur;

	switch (cur->nidt) {
	case NVME_NIDT_EUI64:
		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
		return NVME_NIDT_EUI64_LEN;
	case NVME_NIDT_NGUID:
		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
		return NVME_NIDT_NGUID_LEN;
	case NVME_NIDT_UUID:
		if (cur->nidl != NVME_NIDT_UUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		uuid_copy(&ids->uuid, data + sizeof(*cur));
		return NVME_NIDT_UUID_LEN;
1355 1356 1357 1358 1359 1360 1361 1362 1363
	case NVME_NIDT_CSI:
		if (cur->nidl != NVME_NIDT_CSI_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
		*csi_seen = true;
		return NVME_NIDT_CSI_LEN;
1364 1365 1366 1367 1368 1369
	default:
		/* Skip unknown types */
		return cur->nidl;
	}
}

1370
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1371
		struct nvme_ns_ids *ids)
1372 1373
{
	struct nvme_command c = { };
1374 1375
	bool csi_seen = false;
	int status, pos, len;
1376 1377
	void *data;

1378 1379
	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
		return 0;
1380 1381 1382
	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
		return 0;

1383 1384 1385 1386 1387 1388 1389 1390
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

1391
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1392
				      NVME_IDENTIFY_DATA_SIZE);
1393 1394
	if (status) {
		dev_warn(ctrl->device,
1395 1396
			"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
			nsid, status);
1397
		goto free_data;
1398
	}
1399 1400 1401 1402 1403 1404 1405

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

1406
		len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1407
		if (len < 0)
1408
			break;
1409 1410 1411

		len += sizeof(*cur);
	}
1412 1413 1414 1415 1416 1417 1418

	if (nvme_multi_css(ctrl) && !csi_seen) {
		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
			 nsid);
		status = -EINVAL;
	}

1419 1420 1421 1422 1423
free_data:
	kfree(data);
	return status;
}

1424 1425
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1426 1427 1428 1429 1430
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1431 1432
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
1433
	c.identify.cns = NVME_ID_CNS_NS;
1434

1435 1436 1437
	*id = kmalloc(sizeof(**id), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;
1438

1439
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1440
	if (error) {
1441
		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1442
		goto out_free_id;
1443 1444
	}

1445
	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1446 1447
	if ((*id)->ncap == 0) /* namespace not allocated or attached */
		goto out_free_id;
1448 1449 1450 1451 1452 1453 1454 1455

	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
	    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
		memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
	if (ctrl->vs >= NVME_VS(1, 2, 0) &&
	    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
		memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));

1456 1457 1458 1459
	return 0;

out_free_id:
	kfree(*id);
1460
	return error;
1461 1462
}

K
Keith Busch 已提交
1463 1464
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1465
{
1466
	union nvme_result res = { 0 };
1467
	struct nvme_command c;
1468
	int ret;
1469 1470

	memset(&c, 0, sizeof(c));
K
Keith Busch 已提交
1471
	c.features.opcode = op;
1472 1473 1474
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1475
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1476
			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1477
	if (ret >= 0 && result)
1478
		*result = le32_to_cpu(res.u32);
1479
	return ret;
1480 1481
}

K
Keith Busch 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_set_features);

int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_get_features);

C
Christoph Hellwig 已提交
1500 1501 1502 1503 1504 1505
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

1506
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
C
Christoph Hellwig 已提交
1507
			&result);
1508
	if (status < 0)
C
Christoph Hellwig 已提交
1509 1510
		return status;

1511 1512 1513 1514 1515 1516
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
1517
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1518 1519 1520 1521 1522 1523
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

C
Christoph Hellwig 已提交
1524 1525
	return 0;
}
1526
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
C
Christoph Hellwig 已提交
1527

1528
#define NVME_AEN_SUPPORTED \
1529 1530
	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1531 1532 1533

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
1534
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1535 1536
	int status;

1537 1538 1539 1540 1541
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
1542 1543
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1544
			 supported_aens);
1545 1546

	queue_work(nvme_wq, &ctrl->async_event_work);
1547 1548
}

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
/*
 * Convert integer values from ioctl structures to user pointers, silently
 * ignoring the upper bits in the compat case to match behaviour of 32-bit
 * kernels.
 */
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
{
	if (in_compat_syscall())
		ptrval = (compat_uptr_t)ptrval;
	return (void __user *)ptrval;
}

1561 1562 1563 1564 1565 1566 1567 1568 1569
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length, meta_len;
	void __user *metadata;

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
1570 1571
	if (io.flags)
		return -EINVAL;
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
	case nvme_cmd_compare:
		break;
	default:
		return -EINVAL;
	}

	length = (io.nblocks + 1) << ns->lba_shift;
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

	if ((io.control & NVME_RW_PRINFO_PRACT) &&
	    ns->ms == sizeof(struct t10_pi_tuple)) {
		/*
		 * Protection information is stripped/inserted by the
		 * controller.
		 */
		if (nvme_to_user_ptr(io.metadata))
			return -EINVAL;
		meta_len = 0;
		metadata = NULL;
	} else {
		meta_len = (io.nblocks + 1) * ns->ms;
		metadata = nvme_to_user_ptr(io.metadata);
	}
1598

1599
	if (ns->features & NVME_NS_EXT_LBAS) {
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
		length += meta_len;
		meta_len = 0;
	} else if (meta_len) {
		if ((io.metadata & 3) || !io.metadata)
			return -EINVAL;
	}

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
C
Christoph Hellwig 已提交
1610
	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1611 1612 1613 1614 1615 1616 1617 1618
	c.rw.slba = cpu_to_le64(io.slba);
	c.rw.length = cpu_to_le16(io.nblocks);
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);

1619
	return nvme_submit_user_cmd(ns->queue, &c,
1620
			nvme_to_user_ptr(io.addr), length,
1621
			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1622 1623
}

1624
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1625 1626 1627 1628 1629
			struct nvme_passthru_cmd __user *ucmd)
{
	struct nvme_passthru_cmd cmd;
	struct nvme_command c;
	unsigned timeout = 0;
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
	u64 result;
	int status;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
		return -EFAULT;
	if (cmd.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
	c.common.cdw15 = cpu_to_le32(cmd.cdw15);

	if (cmd.timeout_ms)
		timeout = msecs_to_jiffies(cmd.timeout_ms);

	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1657 1658 1659
			nvme_to_user_ptr(cmd.addr), cmd.data_len,
			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
			0, &result, timeout);
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674

	if (status >= 0) {
		if (put_user(result, &ucmd->result))
			return -EFAULT;
	}

	return status;
}

static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			struct nvme_passthru_cmd64 __user *ucmd)
{
	struct nvme_passthru_cmd64 cmd;
	struct nvme_command c;
	unsigned timeout = 0;
1675 1676 1677 1678 1679 1680
	int status;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
		return -EFAULT;
1681 1682
	if (cmd.flags)
		return -EINVAL;
1683 1684 1685 1686 1687 1688 1689

	memset(&c, 0, sizeof(c));
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1690 1691 1692 1693 1694 1695
	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1696 1697 1698 1699 1700

	if (cmd.timeout_ms)
		timeout = msecs_to_jiffies(cmd.timeout_ms);

	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1701 1702
			nvme_to_user_ptr(cmd.addr), cmd.data_len,
			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1703
			0, &cmd.result, timeout);
1704

1705 1706 1707 1708 1709 1710 1711 1712
	if (status >= 0) {
		if (put_user(cmd.result, &ucmd->result))
			return -EFAULT;
	}

	return status;
}

1713 1714 1715 1716
/*
 * Issue ioctl requests on the first available path.  Note that unlike normal
 * block layer requests we will not retry failed request on another controller.
 */
K
Keith Busch 已提交
1717
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1718
		struct nvme_ns_head **head, int *srcu_idx)
1719
{
1720 1721
#ifdef CONFIG_NVME_MULTIPATH
	if (disk->fops == &nvme_ns_head_ops) {
1722 1723
		struct nvme_ns *ns;

1724 1725
		*head = disk->private_data;
		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1726 1727 1728 1729
		ns = nvme_find_path(*head);
		if (!ns)
			srcu_read_unlock(&(*head)->srcu, *srcu_idx);
		return ns;
1730 1731 1732 1733 1734 1735
	}
#endif
	*head = NULL;
	*srcu_idx = -1;
	return disk->private_data;
}
1736

K
Keith Busch 已提交
1737
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1738 1739 1740 1741
{
	if (head)
		srcu_read_unlock(&head->srcu, idx);
}
1742

1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
static bool is_ctrl_ioctl(unsigned int cmd)
{
	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
		return true;
	if (is_sed_ioctl(cmd))
		return true;
	return false;
}

static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
				  void __user *argp,
				  struct nvme_ns_head *head,
				  int srcu_idx)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	int ret;

	nvme_get_ctrl(ns->ctrl);
	nvme_put_ns_from_disk(head, srcu_idx);

	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		ret = nvme_user_cmd(ctrl, NULL, argp);
		break;
	case NVME_IOCTL_ADMIN64_CMD:
		ret = nvme_user_cmd64(ctrl, NULL, argp);
		break;
	default:
		ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
		break;
	}
	nvme_put_ctrl(ctrl);
	return ret;
}

1778 1779
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg)
1780
{
1781
	struct nvme_ns_head *head = NULL;
1782
	void __user *argp = (void __user *)arg;
1783 1784 1785 1786 1787
	struct nvme_ns *ns;
	int srcu_idx, ret;

	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
1788 1789
		return -EWOULDBLOCK;

1790 1791 1792 1793 1794
	/*
	 * Handle ioctls that apply to the controller instead of the namespace
	 * seperately and drop the ns SRCU reference early.  This avoids a
	 * deadlock when deleting namespaces using the passthrough interface.
	 */
1795 1796
	if (is_ctrl_ioctl(cmd))
		return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1797

1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	switch (cmd) {
	case NVME_IOCTL_ID:
		force_successful_syscall_return();
		ret = ns->head->ns_id;
		break;
	case NVME_IOCTL_IO_CMD:
		ret = nvme_user_cmd(ns->ctrl, ns, argp);
		break;
	case NVME_IOCTL_SUBMIT_IO:
		ret = nvme_submit_io(ns, argp);
		break;
1809 1810 1811
	case NVME_IOCTL_IO64_CMD:
		ret = nvme_user_cmd64(ns->ctrl, ns, argp);
		break;
1812 1813 1814 1815 1816 1817 1818
	default:
		if (ns->ndev)
			ret = nvme_nvm_ioctl(ns, cmd, arg);
		else
			ret = -ENOTTY;
	}

1819 1820
	nvme_put_ns_from_disk(head, srcu_idx);
	return ret;
1821 1822
}

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
#ifdef CONFIG_COMPAT
struct nvme_user_io32 {
	__u8	opcode;
	__u8	flags;
	__u16	control;
	__u16	nblocks;
	__u16	rsvd;
	__u64	metadata;
	__u64	addr;
	__u64	slba;
	__u32	dsmgmt;
	__u32	reftag;
	__u16	apptag;
	__u16	appmask;
} __attribute__((__packed__));

#define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)

static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg)
{
	/*
	 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
	 * between 32 bit programs and 64 bit kernel.
	 * The cause is that the results of sizeof(struct nvme_user_io),
	 * which is used to define NVME_IOCTL_SUBMIT_IO,
	 * are not same between 32 bit compiler and 64 bit compiler.
	 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
	 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
	 * Other IOCTL numbers are same between 32 bit and 64 bit.
	 * So there is nothing to do regarding to other IOCTL numbers.
	 */
	if (cmd == NVME_IOCTL_SUBMIT_IO32)
		return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);

	return nvme_ioctl(bdev, mode, cmd, arg);
}
#else
#define nvme_compat_ioctl	NULL
#endif /* CONFIG_COMPAT */

1864 1865
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
C
Christoph Hellwig 已提交
1866 1867
	struct nvme_ns *ns = bdev->bd_disk->private_data;

1868 1869 1870
#ifdef CONFIG_NVME_MULTIPATH
	/* should never be called due to GENHD_FL_HIDDEN */
	if (WARN_ON_ONCE(ns->head->disk))
1871
		goto fail;
1872
#endif
C
Christoph Hellwig 已提交
1873
	if (!kref_get_unless_zero(&ns->kref))
1874 1875 1876 1877
		goto fail;
	if (!try_module_get(ns->ctrl->ops->module))
		goto fail_put_ns;

C
Christoph Hellwig 已提交
1878
	return 0;
1879 1880 1881 1882 1883

fail_put_ns:
	nvme_put_ns(ns);
fail:
	return -ENXIO;
1884 1885 1886 1887
}

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
1888 1889 1890 1891
	struct nvme_ns *ns = disk->private_data;

	module_put(ns->ctrl->ops->module);
	nvme_put_ns(ns);
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
}

static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
	return 0;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
1904 1905
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1906 1907 1908
{
	struct blk_integrity integrity;

1909
	memset(&integrity, 0, sizeof(integrity));
1910
	switch (pi_type) {
1911 1912
	case NVME_NS_DPS_PI_TYPE3:
		integrity.profile = &t10_pi_type3_crc;
1913 1914
		integrity.tag_size = sizeof(u16) + sizeof(u32);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1915 1916 1917 1918
		break;
	case NVME_NS_DPS_PI_TYPE1:
	case NVME_NS_DPS_PI_TYPE2:
		integrity.profile = &t10_pi_type1_crc;
1919 1920
		integrity.tag_size = sizeof(u16);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1921 1922 1923 1924 1925
		break;
	default:
		integrity.profile = NULL;
		break;
	}
1926 1927
	integrity.tuple_size = ms;
	blk_integrity_register(disk, &integrity);
1928
	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1929 1930
}
#else
1931 1932
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1933 1934 1935 1936
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

1937
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1938
{
1939
	struct nvme_ctrl *ctrl = ns->ctrl;
1940
	struct request_queue *queue = disk->queue;
1941 1942
	u32 size = queue_logical_block_size(queue);

1943 1944 1945 1946 1947 1948 1949
	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
		return;
	}

	if (ctrl->nr_streams && ns->sws && ns->sgs)
		size *= ns->sws * ns->sgs;
1950

1951 1952 1953
	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

1954
	queue->limits.discard_alignment = 0;
1955
	queue->limits.discard_granularity = size;
1956

1957 1958 1959 1960
	/* If discard is already enabled, don't reset queue limits */
	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
		return;

1961 1962
	blk_queue_max_discard_sectors(queue, UINT_MAX);
	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1963 1964

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1965
		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1966 1967
}

1968
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1969
{
1970
	u64 max_blocks;
1971

1972 1973
	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
		return;
	/*
	 * Even though NVMe spec explicitly states that MDTS is not
	 * applicable to the write-zeroes:- "The restriction does not apply to
	 * commands that do not transfer data between the host and the
	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
	 * In order to be more cautious use controller's max_hw_sectors value
	 * to configure the maximum sectors for the write-zeroes which is
	 * configured based on the controller's MDTS field in the
	 * nvme_init_identify() if available.
	 */
	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1986
		max_blocks = (u64)USHRT_MAX + 1;
1987
	else
1988
		max_blocks = ns->ctrl->max_hw_sectors + 1;
1989

1990 1991
	blk_queue_max_write_zeroes_sectors(disk->queue,
					   nvme_lba_to_sect(ns, max_blocks));
1992 1993
}

C
Christoph Hellwig 已提交
1994 1995 1996 1997 1998 1999 2000
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
	return !uuid_is_null(&ids->uuid) ||
		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
}

2001 2002 2003 2004
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
	return uuid_equal(&a->uuid, &b->uuid) &&
		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
2005 2006
		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
		a->csi == b->csi;
2007 2008
}

K
Keith Busch 已提交
2009 2010
static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
				 u32 *phys_bs, u32 *io_opt)
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
{
	struct streams_directive_params s;
	int ret;

	if (!ctrl->nr_streams)
		return 0;

	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
	if (ret)
		return ret;

	ns->sws = le32_to_cpu(s.sws);
	ns->sgs = le16_to_cpu(s.sgs);

	if (ns->sws) {
K
Keith Busch 已提交
2026
		*phys_bs = ns->sws * (1 << ns->lba_shift);
2027
		if (ns->sgs)
K
Keith Busch 已提交
2028
			*io_opt = *phys_bs * ns->sgs;
2029 2030 2031 2032 2033
	}

	return 0;
}

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;

	/*
	 * The PI implementation requires the metadata size to be equal to the
	 * t10 pi tuple size.
	 */
	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
	if (ns->ms == sizeof(struct t10_pi_tuple))
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;

	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
		return 0;
	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * The NVMe over Fabrics specification only supports metadata as
		 * part of the extended data LBA.  We rely on HCA/HBA support to
		 * remap the separate metadata buffer from the block layer.
		 */
		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
			return -EINVAL;
		if (ctrl->max_integrity_segments)
			ns->features |=
				(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	} else {
		/*
		 * For PCIe controllers, we can't easily remap the separate
		 * metadata buffer from the block layer and thus require a
		 * separate metadata buffer for block layer metadata/PI support.
		 * We allow extended LBAs for the passthrough interface, though.
		 */
		if (id->flbas & NVME_NS_FLBAS_META_EXT)
			ns->features |= NVME_NS_EXT_LBAS;
		else
			ns->features |= NVME_NS_METADATA_SUPPORTED;
	}

	return 0;
}

2078 2079 2080
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
2081
	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095

	if (ctrl->max_hw_sectors) {
		u32 max_segments =
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;

		max_segments = min_not_zero(max_segments, ctrl->max_segments);
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
	}
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
	blk_queue_dma_alignment(q, 7);
	blk_queue_write_cache(q, vwc, vwc);
}

2096 2097 2098
static void nvme_update_disk_info(struct gendisk *disk,
		struct nvme_ns *ns, struct nvme_id_ns *id)
{
2099
	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
2100
	unsigned short bs = 1 << ns->lba_shift;
D
Damien Le Moal 已提交
2101
	u32 atomic_bs, phys_bs, io_opt = 0;
2102

2103 2104 2105 2106
	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
2107
	if (ns->lba_shift > PAGE_SHIFT) {
2108
		capacity = 0;
2109 2110
		bs = (1 << 9);
	}
2111

2112 2113
	blk_integrity_unregister(disk);

D
Damien Le Moal 已提交
2114
	atomic_bs = phys_bs = bs;
K
Keith Busch 已提交
2115
	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
2116 2117 2118 2119 2120 2121
	if (id->nabo == 0) {
		/*
		 * Bit 1 indicates whether NAWUPF is defined for this namespace
		 * and whether it should be used instead of AWUPF. If NAWUPF ==
		 * 0 then AWUPF must be used instead.
		 */
2122
		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
2123 2124 2125 2126
			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
		else
			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
	}
K
Keith Busch 已提交
2127

2128
	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
2129
		/* NPWG = Namespace Preferred Write Granularity */
K
Keith Busch 已提交
2130
		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
2131
		/* NOWS = Namespace Optimal Write Size */
K
Keith Busch 已提交
2132
		io_opt = bs * (1 + le16_to_cpu(id->nows));
2133 2134
	}

2135
	blk_queue_logical_block_size(disk->queue, bs);
2136 2137 2138 2139 2140 2141 2142 2143
	/*
	 * Linux filesystems assume writing a single physical block is
	 * an atomic operation. Hence limit the physical block size to the
	 * value of the Atomic Write Unit Power Fail parameter.
	 */
	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
	blk_queue_io_min(disk->queue, phys_bs);
	blk_queue_io_opt(disk->queue, io_opt);
2144

2145 2146 2147 2148 2149 2150 2151 2152 2153
	/*
	 * Register a metadata profile for PI, or the plain non-integrity NVMe
	 * metadata masquerading as Type 0 if supported, otherwise reject block
	 * I/O to namespaces with metadata except when the namespace supports
	 * PI, as it can strip/insert in that case.
	 */
	if (ns->ms) {
		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
		    (ns->features & NVME_NS_METADATA_SUPPORTED))
2154 2155
			nvme_init_integrity(disk, ns->ms, ns->pi_type,
					    ns->ctrl->max_integrity_segments);
2156 2157 2158 2159
		else if (!nvme_ns_has_pi(ns))
			capacity = 0;
	}

2160
	set_capacity_and_notify(disk, capacity);
2161

2162
	nvme_config_discard(disk, ns);
2163
	nvme_config_write_zeroes(disk, ns);
2164

2165 2166
	set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
		test_bit(NVME_NS_FORCE_RO, &ns->flags));
2167 2168
}

2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
static inline bool nvme_first_scan(struct gendisk *disk)
{
	/* nvme_alloc_ns() scans the disk prior to adding it */
	return !(disk->flags & GENHD_FL_UP);
}

static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	u32 iob;

	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
	    is_power_of_2(ctrl->max_hw_sectors))
		iob = ctrl->max_hw_sectors;
	else
		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));

	if (!iob)
		return;

	if (!is_power_of_2(iob)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
				ns->disk->disk_name, iob);
		return;
	}

	if (blk_queue_is_zoned(ns->disk->queue)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring zoned namespace IO boundary\n",
				ns->disk->disk_name);
		return;
	}

	blk_queue_chunk_sectors(ns->queue, iob);
}

2206
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
2207
{
K
Keith Busch 已提交
2208 2209
	unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
	int ret;
2210

2211
	blk_mq_freeze_queue(ns->disk->queue);
K
Keith Busch 已提交
2212
	ns->lba_shift = id->lbaf[lbaf].ds;
2213
	nvme_set_queue_limits(ns->ctrl, ns->queue);
2214

2215 2216 2217 2218 2219 2220
	ret = nvme_configure_metadata(ns, id);
	if (ret)
		goto out_unfreeze;
	nvme_set_chunk_sectors(ns, id);
	nvme_update_disk_info(ns->disk, ns, id);

2221
	if (ns->head->ids.csi == NVME_CSI_ZNS) {
2222
		ret = nvme_update_zone_info(ns, lbaf);
2223
		if (ret)
2224
			goto out_unfreeze;
2225 2226
	}

2227
	blk_mq_unfreeze_queue(ns->disk->queue);
2228

2229 2230
	if (blk_queue_is_zoned(ns->queue)) {
		ret = nvme_revalidate_zones(ns);
2231
		if (ret && !nvme_first_scan(ns->disk))
2232
			return ret;
2233 2234
	}

2235
#ifdef CONFIG_NVME_MULTIPATH
2236
	if (ns->head->disk) {
2237
		blk_mq_freeze_queue(ns->head->disk->queue);
2238
		nvme_update_disk_info(ns->head->disk, ns, id);
2239 2240
		blk_stack_limits(&ns->head->disk->queue->limits,
				 &ns->queue->limits, 0);
2241
		blk_queue_update_readahead(ns->head->disk->queue);
2242
		blk_mq_unfreeze_queue(ns->head->disk->queue);
2243
	}
2244
#endif
2245
	return 0;
2246

2247 2248
out_unfreeze:
	blk_mq_unfreeze_queue(ns->disk->queue);
K
Keith Busch 已提交
2249 2250 2251
	return ret;
}

2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
static char nvme_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 1;
	case PR_EXCLUSIVE_ACCESS:
		return 2;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 3;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 4;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 5;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 6;
	default:
		return 0;
	}
};

static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
				u64 key, u64 sa_key, u8 op)
{
2275 2276
	struct nvme_ns_head *head = NULL;
	struct nvme_ns *ns;
2277
	struct nvme_command c;
2278
	int srcu_idx, ret;
2279 2280
	u8 data[16] = { 0, };

2281 2282 2283 2284
	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
		return -EWOULDBLOCK;

2285 2286 2287 2288 2289
	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	memset(&c, 0, sizeof(c));
	c.common.opcode = op;
2290
	c.common.nsid = cpu_to_le32(ns->head->ns_id);
2291
	c.common.cdw10 = cpu_to_le32(cdw10);
2292

2293
	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2294 2295
	nvme_put_ns_from_disk(head, srcu_idx);
	return ret;
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
}

static int nvme_pr_register(struct block_device *bdev, u64 old,
		u64 new, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = old ? 2 : 0;
	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
}

static int nvme_pr_reserve(struct block_device *bdev, u64 key,
		enum pr_type type, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = nvme_pr_type(type) << 8;
	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
}

static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
{
2328
	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2329 2330 2331 2332 2333
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}

static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
2334
	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2335 2336 2337 2338 2339
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
}

static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
2340
	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}

static const struct pr_ops nvme_pr_ops = {
	.pr_register	= nvme_pr_register,
	.pr_reserve	= nvme_pr_reserve,
	.pr_release	= nvme_pr_release,
	.pr_preempt	= nvme_pr_preempt,
	.pr_clear	= nvme_pr_clear,
};

2352
#ifdef CONFIG_BLK_SED_OPAL
2353 2354
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
2355
{
2356
	struct nvme_ctrl *ctrl = data;
2357 2358 2359 2360 2361 2362 2363 2364
	struct nvme_command cmd;

	memset(&cmd, 0, sizeof(cmd));
	if (send)
		cmd.common.opcode = nvme_admin_security_send;
	else
		cmd.common.opcode = nvme_admin_security_recv;
	cmd.common.nsid = 0;
2365 2366
	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
	cmd.common.cdw11 = cpu_to_le32(len);
2367

2368 2369
	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
			NVME_QID_ANY, 1, 0, false);
2370 2371 2372 2373
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */

J
Javier González 已提交
2374
static const struct block_device_operations nvme_bdev_ops = {
2375 2376
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
2377
	.compat_ioctl	= nvme_compat_ioctl,
2378 2379 2380
	.open		= nvme_open,
	.release	= nvme_release,
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
2381
	.report_zones	= nvme_report_zones,
2382 2383 2384
	.pr_ops		= &nvme_pr_ops,
};

2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
{
	struct nvme_ns_head *head = bdev->bd_disk->private_data;

	if (!kref_get_unless_zero(&head->ref))
		return -ENXIO;
	return 0;
}

static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
{
	nvme_put_ns_head(disk->private_data);
}

const struct block_device_operations nvme_ns_head_ops = {
	.owner		= THIS_MODULE,
2402
	.submit_bio	= nvme_ns_head_submit_bio,
2403 2404 2405
	.open		= nvme_ns_head_open,
	.release	= nvme_ns_head_release,
	.ioctl		= nvme_ioctl,
2406
	.compat_ioctl	= nvme_compat_ioctl,
2407
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
2408
	.report_zones	= nvme_report_zones,
2409 2410 2411 2412
	.pr_ops		= &nvme_pr_ops,
};
#endif /* CONFIG_NVME_MULTIPATH */

2413 2414 2415 2416 2417 2418 2419 2420
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
	unsigned long timeout =
		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
	int ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
K
Keith Busch 已提交
2421 2422
		if (csts == ~0)
			return -ENODEV;
2423 2424 2425
		if ((csts & NVME_CSTS_RDY) == bit)
			break;

2426
		usleep_range(1000, 2000);
2427 2428 2429
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2430
			dev_err(ctrl->device,
2431 2432
				"Device not ready; aborting %s, CSTS=0x%x\n",
				enabled ? "initialisation" : "reset", csts);
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
			return -ENODEV;
		}
	}

	return ret;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
2446
int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2447 2448 2449 2450 2451 2452 2453 2454 2455
{
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config &= ~NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2456

2457
	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2458 2459
		msleep(NVME_QUIRK_DELAY_AMOUNT);

2460
	return nvme_wait_ready(ctrl, ctrl->cap, false);
2461
}
2462
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2463

2464
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2465
{
2466
	unsigned dev_page_min;
2467 2468
	int ret;

2469 2470 2471 2472 2473 2474 2475
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
	if (ret) {
		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
		return ret;
	}
	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;

2476
	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2477
		dev_err(ctrl->device,
2478
			"Minimum device page size %u too large for host (%u)\n",
2479
			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2480 2481 2482
		return -ENODEV;
	}

2483 2484 2485 2486
	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
		ctrl->ctrl_config = NVME_CC_CSS_CSI;
	else
		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2487
	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2488
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2489 2490 2491 2492 2493 2494
	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
	ctrl->ctrl_config |= NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2495
	return nvme_wait_ready(ctrl, ctrl->cap, true);
2496
}
2497
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2498 2499 2500

int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
2501
	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
	u32 csts;
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2520
			dev_err(ctrl->device,
2521 2522 2523 2524 2525 2526 2527
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return ret;
}
2528
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2529

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
	int ret;

	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
		return 0;

	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
			NULL);
	if (ret)
		dev_warn_once(ctrl->device,
			"could not set timestamp (%d)\n", ret);
	return ret;
}

2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
static int nvme_configure_acre(struct nvme_ctrl *ctrl)
{
	struct nvme_feat_host_behavior *host;
	int ret;

	/* Don't bother enabling the feature if retry delay is not reported */
	if (!ctrl->crdt[0])
		return 0;

	host = kzalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
		return 0;

	host->acre = NVME_ENABLE_ACRE;
	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
				host, sizeof(*host), NULL);
	kfree(host);
	return ret;
}

2567
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2568 2569 2570 2571 2572 2573 2574 2575
{
	/*
	 * APST (Autonomous Power State Transition) lets us program a
	 * table of power state transitions that the controller will
	 * perform automatically.  We configure it with a simple
	 * heuristic: we are willing to spend at most 2% of the time
	 * transitioning between power states.  Therefore, when running
	 * in any given state, we will enter the next lower-power
A
Andy Lutomirski 已提交
2576
	 * non-operational state after waiting 50 * (enlat + exlat)
2577
	 * microseconds, as long as that state's exit latency is under
2578 2579 2580 2581 2582 2583 2584 2585 2586
	 * the requested maximum latency.
	 *
	 * We will not autonomously enter any non-operational state for
	 * which the total latency exceeds ps_max_latency_us.  Users
	 * can set ps_max_latency_us to zero to turn off APST.
	 */

	unsigned apste;
	struct nvme_feat_auto_pst *table;
2587 2588
	u64 max_lat_us = 0;
	int max_ps = -1;
2589 2590 2591 2592 2593 2594 2595
	int ret;

	/*
	 * If APST isn't supported or if we haven't been initialized yet,
	 * then don't do anything.
	 */
	if (!ctrl->apsta)
2596
		return 0;
2597 2598 2599

	if (ctrl->npss > 31) {
		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2600
		return 0;
2601 2602 2603 2604
	}

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
2605
		return 0;
2606

2607
	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2608 2609
		/* Turn off APST. */
		apste = 0;
2610
		dev_dbg(ctrl->device, "APST disabled\n");
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
	} else {
		__le64 target = cpu_to_le64(0);
		int state;

		/*
		 * Walk through all states from lowest- to highest-power.
		 * According to the spec, lower-numbered states use more
		 * power.  NPSS, despite the name, is the index of the
		 * lowest-power state, not the number of states.
		 */
		for (state = (int)ctrl->npss; state >= 0; state--) {
2622
			u64 total_latency_us, exit_latency_us, transition_ms;
2623 2624 2625 2626

			if (target)
				table->entries[state] = target;

2627 2628 2629 2630 2631 2632 2633 2634
			/*
			 * Don't allow transitions to the deepest state
			 * if it's quirked off.
			 */
			if (state == ctrl->npss &&
			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
				continue;

2635 2636 2637 2638 2639 2640 2641 2642
			/*
			 * Is this state a useful non-operational state for
			 * higher-power states to autonomously transition to?
			 */
			if (!(ctrl->psd[state].flags &
			      NVME_PS_FLAGS_NON_OP_STATE))
				continue;

2643 2644 2645
			exit_latency_us =
				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
			if (exit_latency_us > ctrl->ps_max_latency_us)
2646 2647
				continue;

2648 2649 2650 2651
			total_latency_us =
				exit_latency_us +
				le32_to_cpu(ctrl->psd[state].entry_lat);

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
			/*
			 * This state is good.  Use it as the APST idle
			 * target for higher power states.
			 */
			transition_ms = total_latency_us + 19;
			do_div(transition_ms, 20);
			if (transition_ms > (1 << 24) - 1)
				transition_ms = (1 << 24) - 1;

			target = cpu_to_le64((state << 3) |
					     (transition_ms << 8));
2663 2664 2665 2666 2667 2668

			if (max_ps == -1)
				max_ps = state;

			if (total_latency_us > max_lat_us)
				max_lat_us = total_latency_us;
2669 2670 2671
		}

		apste = 1;
2672 2673 2674 2675 2676 2677 2678

		if (max_ps == -1) {
			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
		} else {
			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
				max_ps, max_lat_us, (int)sizeof(*table), table);
		}
2679 2680 2681 2682 2683 2684 2685 2686
	}

	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
				table, sizeof(*table), NULL);
	if (ret)
		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);

	kfree(table);
2687
	return ret;
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	u64 latency;

	switch (val) {
	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
	case PM_QOS_LATENCY_ANY:
		latency = U64_MAX;
		break;

	default:
		latency = val;
	}

	if (ctrl->ps_max_latency_us != latency) {
		ctrl->ps_max_latency_us = latency;
		nvme_configure_apst(ctrl);
	}
}

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
struct nvme_core_quirk_entry {
	/*
	 * NVMe model and firmware strings are padded with spaces.  For
	 * simplicity, strings in the quirk table are padded with NULLs
	 * instead.
	 */
	u16 vid;
	const char *mn;
	const char *fr;
	unsigned long quirks;
};

static const struct nvme_core_quirk_entry core_quirks[] = {
2724
	{
2725 2726 2727 2728 2729 2730
		/*
		 * This Toshiba device seems to die using any APST states.  See:
		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
		 */
		.vid = 0x1179,
		.mn = "THNSF5256GPUK TOSHIBA",
2731
		.quirks = NVME_QUIRK_NO_APST,
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
	},
	{
		/*
		 * This LiteON CL1-3D*-Q11 firmware version has a race
		 * condition associated with actions related to suspend to idle
		 * LiteON has resolved the problem in future firmware
		 */
		.vid = 0x14a4,
		.fr = "22301111",
		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2742
	}
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
};

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{
	size_t matchlen;

	if (!match)
		return true;

	matchlen = strlen(match);
	WARN_ON_ONCE(matchlen > len);

	if (memcmp(idstr, match, matchlen))
		return false;

	for (; matchlen < len; matchlen++)
		if (idstr[matchlen] != ' ')
			return false;

	return true;
}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{
	return q->vid == le16_to_cpu(id->vid) &&
		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
		string_matches(id->fr, q->fr, sizeof(id->fr));
}

C
Christoph Hellwig 已提交
2774 2775
static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
2776 2777 2778 2779
{
	size_t nqnlen;
	int off;

2780 2781 2782 2783 2784 2785
	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
			return;
		}
2786

2787 2788 2789
		if (ctrl->vs >= NVME_VS(1, 2, 1))
			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
	}
2790 2791

	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
C
Christoph Hellwig 已提交
2792
	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2793
			"nqn.2014.08.org.nvmexpress:%04x%04x",
2794
			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
C
Christoph Hellwig 已提交
2795
	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2796
	off += sizeof(id->sn);
C
Christoph Hellwig 已提交
2797
	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2798
	off += sizeof(id->mn);
C
Christoph Hellwig 已提交
2799 2800 2801
	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}

2802
static void nvme_release_subsystem(struct device *dev)
C
Christoph Hellwig 已提交
2803
{
2804 2805 2806
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2807 2808
	if (subsys->instance >= 0)
		ida_simple_remove(&nvme_instance_ida, subsys->instance);
C
Christoph Hellwig 已提交
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
	kfree(subsys);
}

static void nvme_destroy_subsystem(struct kref *ref)
{
	struct nvme_subsystem *subsys =
			container_of(ref, struct nvme_subsystem, ref);

	mutex_lock(&nvme_subsystems_lock);
	list_del(&subsys->entry);
	mutex_unlock(&nvme_subsystems_lock);

C
Christoph Hellwig 已提交
2821
	ida_destroy(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
	device_del(&subsys->dev);
	put_device(&subsys->dev);
}

static void nvme_put_subsystem(struct nvme_subsystem *subsys)
{
	kref_put(&subsys->ref, nvme_destroy_subsystem);
}

static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
{
	struct nvme_subsystem *subsys;

	lockdep_assert_held(&nvme_subsystems_lock);

2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
	/*
	 * Fail matches for discovery subsystems. This results
	 * in each discovery controller bound to a unique subsystem.
	 * This avoids issues with validating controller values
	 * that can only be true when there is a single unique subsystem.
	 * There may be multiple and completely independent entities
	 * that provide discovery controllers.
	 */
	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
		return NULL;

C
Christoph Hellwig 已提交
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
	list_for_each_entry(subsys, &nvme_subsystems, entry) {
		if (strcmp(subsys->subnqn, subsysnqn))
			continue;
		if (!kref_get_unless_zero(&subsys->ref))
			continue;
		return subsys;
	}

	return NULL;
}

2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
#define SUBSYS_ATTR_RO(_name, _mode, _show)			\
	struct device_attribute subsys_attr_##_name = \
		__ATTR(_name, _mode, _show, NULL)

static ssize_t nvme_subsys_show_nqn(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2870
	return sysfs_emit(buf, "%s\n", subsys->subnqn);
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);

#define nvme_subsys_show_str_function(field)				\
static ssize_t subsys_##field##_show(struct device *dev,		\
			    struct device_attribute *attr, char *buf)	\
{									\
	struct nvme_subsystem *subsys =					\
		container_of(dev, struct nvme_subsystem, dev);		\
	return sprintf(buf, "%.*s\n",					\
		       (int)sizeof(subsys->field), subsys->field);	\
}									\
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);

nvme_subsys_show_str_function(model);
nvme_subsys_show_str_function(serial);
nvme_subsys_show_str_function(firmware_rev);

static struct attribute *nvme_subsys_attrs[] = {
	&subsys_attr_model.attr,
	&subsys_attr_serial.attr,
	&subsys_attr_firmware_rev.attr,
	&subsys_attr_subsysnqn.attr,
2894 2895 2896
#ifdef CONFIG_NVME_MULTIPATH
	&subsys_attr_iopolicy.attr,
#endif
2897 2898 2899
	NULL,
};

2900
static const struct attribute_group nvme_subsys_attrs_group = {
2901 2902 2903 2904 2905 2906 2907 2908
	.attrs = nvme_subsys_attrs,
};

static const struct attribute_group *nvme_subsys_attrs_groups[] = {
	&nvme_subsys_attrs_group,
	NULL,
};

2909 2910 2911 2912 2913
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{
	return ctrl->opts && ctrl->opts->discovery_nqn;
}

2914 2915
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2916
{
2917
	struct nvme_ctrl *tmp;
2918

2919 2920
	lockdep_assert_held(&nvme_subsystems_lock);

2921
	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2922
		if (nvme_state_terminal(tmp))
2923 2924 2925 2926 2927 2928 2929 2930
			continue;

		if (tmp->cntlid == ctrl->cntlid) {
			dev_err(ctrl->device,
				"Duplicate cntlid %u with %s, rejecting\n",
				ctrl->cntlid, dev_name(tmp->device));
			return false;
		}
2931

2932
		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2933
		    nvme_discovery_ctrl(ctrl))
2934 2935 2936 2937 2938
			continue;

		dev_err(ctrl->device,
			"Subsystem does not support multiple controllers\n");
		return false;
2939 2940
	}

2941
	return true;
2942 2943
}

C
Christoph Hellwig 已提交
2944 2945 2946 2947 2948 2949 2950 2951
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	struct nvme_subsystem *subsys, *found;
	int ret;

	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
	if (!subsys)
		return -ENOMEM;
2952 2953

	subsys->instance = -1;
C
Christoph Hellwig 已提交
2954 2955 2956
	mutex_init(&subsys->lock);
	kref_init(&subsys->ref);
	INIT_LIST_HEAD(&subsys->ctrls);
C
Christoph Hellwig 已提交
2957
	INIT_LIST_HEAD(&subsys->nsheads);
C
Christoph Hellwig 已提交
2958 2959 2960 2961 2962 2963
	nvme_init_subnqn(subsys, ctrl, id);
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->cmic = id->cmic;
2964
	subsys->awupf = le16_to_cpu(id->awupf);
2965 2966 2967
#ifdef CONFIG_NVME_MULTIPATH
	subsys->iopolicy = NVME_IOPOLICY_NUMA;
#endif
C
Christoph Hellwig 已提交
2968 2969 2970

	subsys->dev.class = nvme_subsys_class;
	subsys->dev.release = nvme_release_subsystem;
2971
	subsys->dev.groups = nvme_subsys_attrs_groups;
2972
	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
C
Christoph Hellwig 已提交
2973 2974 2975 2976 2977
	device_initialize(&subsys->dev);

	mutex_lock(&nvme_subsystems_lock);
	found = __nvme_find_get_subsystem(subsys->subnqn);
	if (found) {
2978
		put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2979
		subsys = found;
2980

2981
		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
C
Christoph Hellwig 已提交
2982
			ret = -EINVAL;
2983
			goto out_put_subsystem;
C
Christoph Hellwig 已提交
2984 2985 2986 2987 2988 2989
		}
	} else {
		ret = device_add(&subsys->dev);
		if (ret) {
			dev_err(ctrl->device,
				"failed to register subsystem device.\n");
2990
			put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2991 2992
			goto out_unlock;
		}
C
Christoph Hellwig 已提交
2993
		ida_init(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2994 2995 2996
		list_add_tail(&subsys->entry, &nvme_subsystems);
	}

2997 2998 2999
	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
				dev_name(ctrl->device));
	if (ret) {
C
Christoph Hellwig 已提交
3000 3001
		dev_err(ctrl->device,
			"failed to create sysfs link from subsystem.\n");
3002
		goto out_put_subsystem;
C
Christoph Hellwig 已提交
3003 3004
	}

3005 3006
	if (!found)
		subsys->instance = ctrl->instance;
3007
	ctrl->subsys = subsys;
C
Christoph Hellwig 已提交
3008
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
3009
	mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
3010 3011
	return 0;

3012 3013
out_put_subsystem:
	nvme_put_subsystem(subsys);
C
Christoph Hellwig 已提交
3014 3015 3016
out_unlock:
	mutex_unlock(&nvme_subsystems_lock);
	return ret;
3017 3018
}

3019
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
3020
		void *log, size_t size, u64 offset)
K
Keith Busch 已提交
3021 3022
{
	struct nvme_command c = { };
K
Keith Busch 已提交
3023
	u32 dwlen = nvme_bytes_to_numd(size);
3024 3025

	c.get_log_page.opcode = nvme_admin_get_log_page;
3026
	c.get_log_page.nsid = cpu_to_le32(nsid);
3027
	c.get_log_page.lid = log_page;
3028
	c.get_log_page.lsp = lsp;
3029 3030
	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
3031 3032
	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
3033
	c.get_log_page.csi = csi;
K
Keith Busch 已提交
3034 3035 3036 3037

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}

3038 3039
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
				struct nvme_effects_log **log)
3040
{
3041
	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
3042 3043
	int ret;

3044 3045
	if (cel)
		goto out;
3046

3047 3048 3049
	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
	if (!cel)
		return -ENOMEM;
3050

3051
	ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
3052
			cel, sizeof(*cel), 0);
3053
	if (ret) {
3054 3055
		kfree(cel);
		return ret;
3056
	}
3057

3058
	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
3059
out:
3060
	*log = cel;
3061
	return 0;
3062 3063
}

3064 3065 3066 3067 3068 3069 3070 3071 3072
/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_identify(struct nvme_ctrl *ctrl)
{
	struct nvme_id_ctrl *id;
	int ret, page_shift;
3073
	u32 max_hw_sectors;
3074
	bool prev_apst_enabled;
3075

3076 3077
	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
	if (ret) {
3078
		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3079 3080
		return ret;
	}
3081
	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
3082
	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3083

3084
	if (ctrl->vs >= NVME_VS(1, 1, 0))
3085
		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3086

3087 3088
	ret = nvme_identify_ctrl(ctrl, &id);
	if (ret) {
3089
		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3090 3091 3092
		return -EIO;
	}

3093
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3094
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3095
		if (ret < 0)
3096
			goto out_free;
3097
	}
3098

3099 3100 3101
	if (!(ctrl->ops->flags & NVME_F_FABRICS))
		ctrl->cntlid = le16_to_cpu(id->cntlid);

3102
	if (!ctrl->identified) {
C
Christoph Hellwig 已提交
3103 3104 3105 3106 3107 3108
		int i;

		ret = nvme_init_subsystem(ctrl, id);
		if (ret)
			goto out_free;

3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
		/*
		 * Check for quirks.  Quirk can depend on firmware version,
		 * so, in principle, the set of quirks present can change
		 * across a reset.  As a possible future enhancement, we
		 * could re-scan for quirks every time we reinitialize
		 * the device, but we'd have to make sure that the driver
		 * behaves intelligently if the quirks change.
		 */
		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
			if (quirk_matches(id, &core_quirks[i]))
				ctrl->quirks |= core_quirks[i].quirks;
		}
	}

3123
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3124
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3125 3126 3127
		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
	}

3128 3129 3130 3131
	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
	ctrl->crdt[2] = le16_to_cpu(id->crdt3);

3132
	ctrl->oacs = le16_to_cpu(id->oacs);
3133
	ctrl->oncs = le16_to_cpu(id->oncs);
3134
	ctrl->mtfa = le16_to_cpu(id->mtfa);
3135
	ctrl->oaes = le32_to_cpu(id->oaes);
3136 3137 3138
	ctrl->wctemp = le16_to_cpu(id->wctemp);
	ctrl->cctemp = le16_to_cpu(id->cctemp);

3139
	atomic_set(&ctrl->abort_limit, id->acl + 1);
3140 3141
	ctrl->vwc = id->vwc;
	if (id->mdts)
3142
		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
3143
	else
3144 3145 3146
		max_hw_sectors = UINT_MAX;
	ctrl->max_hw_sectors =
		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3147

3148
	nvme_set_queue_limits(ctrl, ctrl->admin_q);
3149
	ctrl->sgls = le32_to_cpu(id->sgls);
S
Sagi Grimberg 已提交
3150
	ctrl->kas = le16_to_cpu(id->kas);
C
Christoph Hellwig 已提交
3151
	ctrl->max_namespaces = le32_to_cpu(id->mnan);
S
Sagi Grimberg 已提交
3152
	ctrl->ctratt = le32_to_cpu(id->ctratt);
3153

3154 3155
	if (id->rtd3e) {
		/* us -> s */
3156
		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3157 3158 3159 3160 3161

		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
						 shutdown_timeout, 60);

		if (ctrl->shutdown_timeout != shutdown_timeout)
3162
			dev_info(ctrl->device,
3163 3164 3165 3166 3167
				 "Shutdown timeout set to %u seconds\n",
				 ctrl->shutdown_timeout);
	} else
		ctrl->shutdown_timeout = shutdown_timeout;

3168
	ctrl->npss = id->npss;
3169 3170
	ctrl->apsta = id->apsta;
	prev_apst_enabled = ctrl->apst_enabled;
3171 3172
	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
		if (force_apst && id->apsta) {
3173
			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3174
			ctrl->apst_enabled = true;
3175
		} else {
3176
			ctrl->apst_enabled = false;
3177 3178
		}
	} else {
3179
		ctrl->apst_enabled = id->apsta;
3180
	}
3181 3182
	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));

3183
	if (ctrl->ops->flags & NVME_F_FABRICS) {
3184 3185 3186 3187 3188 3189 3190 3191 3192
		ctrl->icdoff = le16_to_cpu(id->icdoff);
		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
		ctrl->maxcmd = le16_to_cpu(id->maxcmd);

		/*
		 * In fabrics we need to verify the cntlid matches the
		 * admin connect
		 */
3193
		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3194 3195 3196 3197
			dev_err(ctrl->device,
				"Mismatching cntlid: Connect %u vs Identify "
				"%u, rejecting\n",
				ctrl->cntlid, le16_to_cpu(id->cntlid));
3198
			ret = -EINVAL;
3199 3200
			goto out_free;
		}
S
Sagi Grimberg 已提交
3201

3202
		if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3203
			dev_err(ctrl->device,
S
Sagi Grimberg 已提交
3204 3205
				"keep-alive support is mandatory for fabrics\n");
			ret = -EINVAL;
3206
			goto out_free;
S
Sagi Grimberg 已提交
3207
		}
3208
	} else {
3209 3210
		ctrl->hmpre = le32_to_cpu(id->hmpre);
		ctrl->hmmin = le32_to_cpu(id->hmmin);
3211 3212
		ctrl->hmminds = le32_to_cpu(id->hmminds);
		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3213
	}
3214

C
Christoph Hellwig 已提交
3215
	ret = nvme_mpath_init(ctrl, id);
3216
	kfree(id);
3217

C
Christoph Hellwig 已提交
3218 3219 3220
	if (ret < 0)
		return ret;

3221
	if (ctrl->apst_enabled && !prev_apst_enabled)
3222
		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3223
	else if (!ctrl->apst_enabled && prev_apst_enabled)
3224 3225
		dev_pm_qos_hide_latency_tolerance(ctrl->device);

3226 3227 3228
	ret = nvme_configure_apst(ctrl);
	if (ret < 0)
		return ret;
3229 3230 3231 3232
	
	ret = nvme_configure_timestamp(ctrl);
	if (ret < 0)
		return ret;
3233 3234 3235 3236

	ret = nvme_configure_directives(ctrl);
	if (ret < 0)
		return ret;
3237

3238 3239 3240 3241
	ret = nvme_configure_acre(ctrl);
	if (ret < 0)
		return ret;

3242
	if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
K
Keith Busch 已提交
3243 3244 3245 3246
		ret = nvme_hwmon_init(ctrl);
		if (ret < 0)
			return ret;
	}
3247

3248
	ctrl->identified = true;
3249

3250 3251 3252 3253
	return 0;

out_free:
	kfree(id);
3254
	return ret;
3255
}
3256
EXPORT_SYMBOL_GPL(nvme_init_identify);
3257

3258
static int nvme_dev_open(struct inode *inode, struct file *file)
3259
{
3260 3261
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3262

3263 3264 3265 3266
	switch (ctrl->state) {
	case NVME_CTRL_LIVE:
		break;
	default:
3267
		return -EWOULDBLOCK;
3268 3269
	}

3270
	nvme_get_ctrl(ctrl);
3271 3272
	if (!try_module_get(ctrl->ops->module)) {
		nvme_put_ctrl(ctrl);
3273
		return -EINVAL;
3274
	}
3275

3276
	file->private_data = ctrl;
3277 3278 3279
	return 0;
}

3280 3281 3282 3283 3284 3285 3286 3287 3288 3289
static int nvme_dev_release(struct inode *inode, struct file *file)
{
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);

	module_put(ctrl->ops->module);
	nvme_put_ctrl(ctrl);
	return 0;
}

3290 3291 3292 3293 3294
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
	struct nvme_ns *ns;
	int ret;

3295
	down_read(&ctrl->namespaces_rwsem);
3296 3297 3298 3299 3300 3301 3302
	if (list_empty(&ctrl->namespaces)) {
		ret = -ENOTTY;
		goto out_unlock;
	}

	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3303
		dev_warn(ctrl->device,
3304 3305 3306 3307 3308
			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
		ret = -EINVAL;
		goto out_unlock;
	}

3309
	dev_warn(ctrl->device,
3310 3311
		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
	kref_get(&ns->kref);
3312
	up_read(&ctrl->namespaces_rwsem);
3313 3314 3315 3316 3317 3318

	ret = nvme_user_cmd(ctrl, ns, argp);
	nvme_put_ns(ns);
	return ret;

out_unlock:
3319
	up_read(&ctrl->namespaces_rwsem);
3320 3321 3322
	return ret;
}

3323 3324 3325 3326 3327 3328 3329 3330 3331
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg)
{
	struct nvme_ctrl *ctrl = file->private_data;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_cmd(ctrl, NULL, argp);
3332 3333
	case NVME_IOCTL_ADMIN64_CMD:
		return nvme_user_cmd64(ctrl, NULL, argp);
3334
	case NVME_IOCTL_IO_CMD:
3335
		return nvme_dev_user_cmd(ctrl, argp);
3336
	case NVME_IOCTL_RESET:
3337
		dev_warn(ctrl->device, "resetting controller\n");
3338
		return nvme_reset_ctrl_sync(ctrl);
3339 3340
	case NVME_IOCTL_SUBSYS_RESET:
		return nvme_reset_subsystem(ctrl);
K
Keith Busch 已提交
3341 3342 3343
	case NVME_IOCTL_RESCAN:
		nvme_queue_scan(ctrl);
		return 0;
3344 3345 3346 3347 3348 3349 3350 3351
	default:
		return -ENOTTY;
	}
}

static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
3352
	.release	= nvme_dev_release,
3353
	.unlocked_ioctl	= nvme_dev_ioctl,
3354
	.compat_ioctl	= compat_ptr_ioctl,
3355 3356 3357 3358 3359 3360 3361 3362 3363
};

static ssize_t nvme_sysfs_reset(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	int ret;

3364
	ret = nvme_reset_ctrl_sync(ctrl);
3365 3366 3367
	if (ret < 0)
		return ret;
	return count;
3368
}
3369
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3370

K
Keith Busch 已提交
3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381
static ssize_t nvme_sysfs_rescan(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	nvme_queue_scan(ctrl);
	return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);

3382 3383 3384 3385
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
	struct gendisk *disk = dev_to_disk(dev);

J
Javier González 已提交
3386
	if (disk->fops == &nvme_bdev_ops)
3387 3388 3389 3390 3391
		return nvme_get_ns_from_dev(dev)->head;
	else
		return disk->private_data;
}

3392
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3393
		char *buf)
3394
{
3395 3396 3397
	struct nvme_ns_head *head = dev_to_ns_head(dev);
	struct nvme_ns_ids *ids = &head->ids;
	struct nvme_subsystem *subsys = head->subsys;
C
Christoph Hellwig 已提交
3398 3399
	int serial_len = sizeof(subsys->serial);
	int model_len = sizeof(subsys->model);
3400

3401 3402
	if (!uuid_is_null(&ids->uuid))
		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3403

3404 3405
	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
		return sprintf(buf, "eui.%16phN\n", ids->nguid);
3406

3407 3408
	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
		return sprintf(buf, "eui.%8phN\n", ids->eui64);
3409

C
Christoph Hellwig 已提交
3410 3411
	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
				  subsys->serial[serial_len - 1] == '\0'))
3412
		serial_len--;
C
Christoph Hellwig 已提交
3413 3414
	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
				 subsys->model[model_len - 1] == '\0'))
3415 3416
		model_len--;

C
Christoph Hellwig 已提交
3417 3418
	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
		serial_len, subsys->serial, model_len, subsys->model,
3419
		head->ns_id);
3420
}
J
Joe Perches 已提交
3421
static DEVICE_ATTR_RO(wwid);
3422

3423
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3424
		char *buf)
3425
{
3426
	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3427
}
J
Joe Perches 已提交
3428
static DEVICE_ATTR_RO(nguid);
3429

3430
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3431
		char *buf)
3432
{
3433
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3434 3435 3436 3437

	/* For backward compatibility expose the NGUID to userspace if
	 * we have no UUID set
	 */
3438
	if (uuid_is_null(&ids->uuid)) {
3439 3440
		printk_ratelimited(KERN_WARNING
				   "No UUID available providing old NGUID\n");
3441
		return sprintf(buf, "%pU\n", ids->nguid);
3442
	}
3443
	return sprintf(buf, "%pU\n", &ids->uuid);
3444
}
J
Joe Perches 已提交
3445
static DEVICE_ATTR_RO(uuid);
3446 3447

static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3448
		char *buf)
3449
{
3450
	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3451
}
J
Joe Perches 已提交
3452
static DEVICE_ATTR_RO(eui);
3453 3454

static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3455
		char *buf)
3456
{
3457
	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3458
}
J
Joe Perches 已提交
3459
static DEVICE_ATTR_RO(nsid);
3460

3461
static struct attribute *nvme_ns_id_attrs[] = {
3462
	&dev_attr_wwid.attr,
3463
	&dev_attr_uuid.attr,
3464
	&dev_attr_nguid.attr,
3465 3466
	&dev_attr_eui.attr,
	&dev_attr_nsid.attr,
C
Christoph Hellwig 已提交
3467 3468 3469 3470
#ifdef CONFIG_NVME_MULTIPATH
	&dev_attr_ana_grpid.attr,
	&dev_attr_ana_state.attr,
#endif
3471 3472 3473
	NULL,
};

3474
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3475 3476 3477
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
3478
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3479 3480

	if (a == &dev_attr_uuid.attr) {
3481
		if (uuid_is_null(&ids->uuid) &&
3482
		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3483 3484 3485
			return 0;
	}
	if (a == &dev_attr_nguid.attr) {
3486
		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3487 3488 3489
			return 0;
	}
	if (a == &dev_attr_eui.attr) {
3490
		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3491 3492
			return 0;
	}
C
Christoph Hellwig 已提交
3493 3494
#ifdef CONFIG_NVME_MULTIPATH
	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
J
Javier González 已提交
3495
		if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
C
Christoph Hellwig 已提交
3496 3497 3498 3499 3500
			return 0;
		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
			return 0;
	}
#endif
3501 3502 3503
	return a->mode;
}

3504
static const struct attribute_group nvme_ns_id_attr_group = {
3505 3506
	.attrs		= nvme_ns_id_attrs,
	.is_visible	= nvme_ns_id_attrs_are_visible,
3507 3508
};

3509 3510 3511 3512 3513 3514 3515 3516
const struct attribute_group *nvme_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
#ifdef CONFIG_NVM
	&nvme_nvm_attr_group,
#endif
	NULL,
};

M
Ming Lin 已提交
3517
#define nvme_show_str_function(field)						\
3518 3519 3520 3521
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
C
Christoph Hellwig 已提交
3522 3523
        return sprintf(buf, "%.*s\n",						\
		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3524 3525 3526
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

C
Christoph Hellwig 已提交
3527 3528 3529 3530
nvme_show_str_function(model);
nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);

M
Ming Lin 已提交
3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
#define nvme_show_int_function(field)						\
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
        return sprintf(buf, "%d\n", ctrl->field);	\
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

nvme_show_int_function(cntlid);
3541
nvme_show_int_function(numa_node);
3542 3543
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
3544

M
Ming Lin 已提交
3545 3546 3547 3548 3549 3550 3551
static ssize_t nvme_sysfs_delete(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (device_remove_file_self(dev, attr))
3552
		nvme_delete_ctrl_sync(ctrl);
M
Ming Lin 已提交
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
	return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);

static ssize_t nvme_sysfs_show_transport(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3563
	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
M
Ming Lin 已提交
3564 3565 3566
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

3567 3568 3569 3570 3571 3572 3573 3574 3575
static ssize_t nvme_sysfs_show_state(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	static const char *const state_name[] = {
		[NVME_CTRL_NEW]		= "new",
		[NVME_CTRL_LIVE]	= "live",
		[NVME_CTRL_RESETTING]	= "resetting",
3576
		[NVME_CTRL_CONNECTING]	= "connecting",
3577
		[NVME_CTRL_DELETING]	= "deleting",
3578
		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
		[NVME_CTRL_DEAD]	= "dead",
	};

	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
	    state_name[ctrl->state])
		return sprintf(buf, "%s\n", state_name[ctrl->state]);

	return sprintf(buf, "unknown state\n");
}

static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);

M
Ming Lin 已提交
3591 3592 3593 3594 3595 3596
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3597
	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
M
Ming Lin 已提交
3598 3599 3600
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

3601 3602 3603 3604 3605 3606
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3607
	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3608 3609 3610
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);

3611 3612 3613 3614 3615 3616
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3617
	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3618 3619 3620
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);

M
Ming Lin 已提交
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630
static ssize_t nvme_sysfs_show_address(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);

3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;

	if (ctrl->opts->max_reconnects == -1)
		return sprintf(buf, "off\n");
	return sprintf(buf, "%d\n",
			opts->max_reconnects * opts->reconnect_delay);
}

static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ctrl_loss_tmo, err;

	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
	if (err)
		return -EINVAL;

	else if (ctrl_loss_tmo < 0)
		opts->max_reconnects = -1;
	else
		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
						opts->reconnect_delay);
	return count;
}
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);

static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->reconnect_delay == -1)
		return sprintf(buf, "off\n");
	return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
}

static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	unsigned int v;
	int err;

	err = kstrtou32(buf, 10, &v);
3682 3683
	if (err)
		return err;
3684 3685 3686 3687 3688 3689 3690

	ctrl->opts->reconnect_delay = v;
	return count;
}
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);

3691 3692
static struct attribute *nvme_dev_attrs[] = {
	&dev_attr_reset_controller.attr,
K
Keith Busch 已提交
3693
	&dev_attr_rescan_controller.attr,
3694 3695 3696
	&dev_attr_model.attr,
	&dev_attr_serial.attr,
	&dev_attr_firmware_rev.attr,
M
Ming Lin 已提交
3697
	&dev_attr_cntlid.attr,
M
Ming Lin 已提交
3698 3699 3700 3701
	&dev_attr_delete_controller.attr,
	&dev_attr_transport.attr,
	&dev_attr_subsysnqn.attr,
	&dev_attr_address.attr,
3702
	&dev_attr_state.attr,
3703
	&dev_attr_numa_node.attr,
3704 3705
	&dev_attr_queue_count.attr,
	&dev_attr_sqsize.attr,
3706
	&dev_attr_hostnqn.attr,
3707
	&dev_attr_hostid.attr,
3708 3709
	&dev_attr_ctrl_loss_tmo.attr,
	&dev_attr_reconnect_delay.attr,
3710 3711 3712
	NULL
};

M
Ming Lin 已提交
3713 3714 3715 3716 3717 3718
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3719 3720 3721 3722
	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
		return 0;
	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
		return 0;
3723 3724
	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
		return 0;
3725 3726
	if (a == &dev_attr_hostid.attr && !ctrl->opts)
		return 0;
3727 3728 3729 3730
	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
		return 0;
	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
		return 0;
M
Ming Lin 已提交
3731 3732 3733 3734

	return a->mode;
}

3735
static const struct attribute_group nvme_dev_attrs_group = {
M
Ming Lin 已提交
3736 3737
	.attrs		= nvme_dev_attrs,
	.is_visible	= nvme_dev_attrs_are_visible,
3738 3739 3740 3741 3742 3743 3744
};

static const struct attribute_group *nvme_dev_attr_groups[] = {
	&nvme_dev_attrs_group,
	NULL,
};

3745
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
C
Christoph Hellwig 已提交
3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776
		unsigned nsid)
{
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
			return h;
	}

	return NULL;
}

static int __nvme_check_ids(struct nvme_subsystem *subsys,
		struct nvme_ns_head *new)
{
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
		if (nvme_ns_ids_valid(&new->ids) &&
		    nvme_ns_ids_equal(&new->ids, &h->ids))
			return -EINVAL;
	}

	return 0;
}

static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
K
Keith Busch 已提交
3777
		unsigned nsid, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3778 3779
{
	struct nvme_ns_head *head;
3780
	size_t size = sizeof(*head);
C
Christoph Hellwig 已提交
3781 3782
	int ret = -ENOMEM;

3783 3784 3785 3786 3787
#ifdef CONFIG_NVME_MULTIPATH
	size += num_possible_nodes() * sizeof(struct nvme_ns *);
#endif

	head = kzalloc(size, GFP_KERNEL);
C
Christoph Hellwig 已提交
3788 3789 3790 3791 3792 3793 3794
	if (!head)
		goto out;
	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
	if (ret < 0)
		goto out_free_head;
	head->instance = ret;
	INIT_LIST_HEAD(&head->list);
3795 3796 3797
	ret = init_srcu_struct(&head->srcu);
	if (ret)
		goto out_ida_remove;
C
Christoph Hellwig 已提交
3798 3799
	head->subsys = ctrl->subsys;
	head->ns_id = nsid;
3800
	head->ids = *ids;
C
Christoph Hellwig 已提交
3801 3802 3803 3804 3805 3806 3807 3808 3809
	kref_init(&head->ref);

	ret = __nvme_check_ids(ctrl->subsys, head);
	if (ret) {
		dev_err(ctrl->device,
			"duplicate IDs for nsid %d\n", nsid);
		goto out_cleanup_srcu;
	}

3810 3811 3812 3813 3814 3815 3816
	if (head->ids.csi) {
		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
		if (ret)
			goto out_cleanup_srcu;
	} else
		head->effects = ctrl->effects;

3817 3818 3819 3820
	ret = nvme_mpath_alloc_disk(ctrl, head);
	if (ret)
		goto out_cleanup_srcu;

C
Christoph Hellwig 已提交
3821
	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3822 3823 3824

	kref_get(&ctrl->subsys->ref);

C
Christoph Hellwig 已提交
3825 3826 3827
	return head;
out_cleanup_srcu:
	cleanup_srcu_struct(&head->srcu);
3828
out_ida_remove:
C
Christoph Hellwig 已提交
3829 3830 3831 3832
	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
	kfree(head);
out:
3833 3834
	if (ret > 0)
		ret = blk_status_to_errno(nvme_error_status(ret));
C
Christoph Hellwig 已提交
3835 3836 3837 3838
	return ERR_PTR(ret);
}

static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3839
		struct nvme_ns_ids *ids, bool is_shared)
C
Christoph Hellwig 已提交
3840 3841 3842 3843 3844 3845
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_ns_head *head = NULL;
	int ret = 0;

	mutex_lock(&ctrl->subsys->lock);
3846
	head = nvme_find_ns_head(ctrl->subsys, nsid);
C
Christoph Hellwig 已提交
3847
	if (!head) {
3848
		head = nvme_alloc_ns_head(ctrl, nsid, ids);
C
Christoph Hellwig 已提交
3849 3850 3851 3852
		if (IS_ERR(head)) {
			ret = PTR_ERR(head);
			goto out_unlock;
		}
3853
		head->shared = is_shared;
C
Christoph Hellwig 已提交
3854
	} else {
3855
		ret = -EINVAL;
3856
		if (!is_shared || !head->shared) {
3857
			dev_err(ctrl->device,
3858 3859
				"Duplicate unshared namespace %d\n", nsid);
			goto out_put_ns_head;
3860
		}
3861
		if (!nvme_ns_ids_equal(&head->ids, ids)) {
C
Christoph Hellwig 已提交
3862 3863 3864
			dev_err(ctrl->device,
				"IDs don't match for shared namespace %d\n",
					nsid);
3865
			goto out_put_ns_head;
C
Christoph Hellwig 已提交
3866 3867 3868
		}
	}

3869
	list_add_tail_rcu(&ns->siblings, &head->list);
C
Christoph Hellwig 已提交
3870
	ns->head = head;
3871 3872
	mutex_unlock(&ctrl->subsys->lock);
	return 0;
C
Christoph Hellwig 已提交
3873

3874 3875
out_put_ns_head:
	nvme_put_ns_head(head);
C
Christoph Hellwig 已提交
3876 3877 3878 3879 3880
out_unlock:
	mutex_unlock(&ctrl->subsys->lock);
	return ret;
}

3881 3882 3883 3884 3885
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);

C
Christoph Hellwig 已提交
3886
	return nsa->head->ns_id - nsb->head->ns_id;
3887 3888
}

3889
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3890
{
3891
	struct nvme_ns *ns, *ret = NULL;
3892

3893
	down_read(&ctrl->namespaces_rwsem);
3894
	list_for_each_entry(ns, &ctrl->namespaces, list) {
C
Christoph Hellwig 已提交
3895
		if (ns->head->ns_id == nsid) {
3896 3897
			if (!kref_get_unless_zero(&ns->kref))
				continue;
3898 3899 3900
			ret = ns;
			break;
		}
C
Christoph Hellwig 已提交
3901
		if (ns->head->ns_id > nsid)
3902 3903
			break;
	}
3904
	up_read(&ctrl->namespaces_rwsem);
3905
	return ret;
3906
}
3907
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3908

3909 3910
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
		struct nvme_ns_ids *ids)
3911 3912 3913
{
	struct nvme_ns *ns;
	struct gendisk *disk;
3914 3915
	struct nvme_id_ns *id;
	char disk_name[DISK_NAME_LEN];
3916
	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT;
3917

3918
	if (nvme_identify_ns(ctrl, nsid, ids, &id))
3919 3920
		return;

3921 3922
	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
3923
		goto out_free_id;
3924 3925

	ns->queue = blk_mq_init_queue(ctrl->tagset);
3926
	if (IS_ERR(ns->queue))
C
Christoph Hellwig 已提交
3927
		goto out_free_ns;
3928

3929
	if (ctrl->opts && ctrl->opts->data_digest)
3930
		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3931

3932
	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3933 3934 3935
	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

3936 3937 3938 3939
	ns->queue->queuedata = ns;
	ns->ctrl = ctrl;
	kref_init(&ns->kref);

3940
	if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
3941
		goto out_free_queue;
3942
	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3943

3944
	disk = alloc_disk_node(0, node);
3945
	if (!disk)
C
Christoph Hellwig 已提交
3946
		goto out_unlink_ns;
3947

J
Javier González 已提交
3948
	disk->fops = &nvme_bdev_ops;
3949 3950
	disk->private_data = ns;
	disk->queue = ns->queue;
3951
	disk->flags = flags;
3952 3953 3954
	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
	ns->disk = disk;

3955
	if (nvme_update_ns_info(ns, id))
3956
		goto out_put_disk;
3957

3958
	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3959
		if (nvme_nvm_register(ns, disk_name, node)) {
3960 3961 3962 3963 3964
			dev_warn(ctrl->device, "LightNVM init failure\n");
			goto out_put_disk;
		}
	}

3965
	down_write(&ctrl->namespaces_rwsem);
3966
	list_add_tail(&ns->list, &ctrl->namespaces);
3967
	up_write(&ctrl->namespaces_rwsem);
3968

3969
	nvme_get_ctrl(ctrl);
3970

3971
	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3972

C
Christoph Hellwig 已提交
3973
	nvme_mpath_add_disk(ns, id);
3974
	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
C
Christoph Hellwig 已提交
3975 3976
	kfree(id);

3977
	return;
3978
 out_put_disk:
3979 3980
	/* prevent double queue cleanup */
	ns->disk->queue = NULL;
3981
	put_disk(ns->disk);
C
Christoph Hellwig 已提交
3982 3983 3984
 out_unlink_ns:
	mutex_lock(&ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
3985 3986
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
C
Christoph Hellwig 已提交
3987
	mutex_unlock(&ctrl->subsys->lock);
3988
	nvme_put_ns_head(ns->head);
3989 3990 3991 3992
 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
3993 3994
 out_free_id:
	kfree(id);
3995 3996 3997 3998
}

static void nvme_ns_remove(struct nvme_ns *ns)
{
3999 4000
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;
4001

4002
	set_capacity(ns->disk, 0);
4003
	nvme_fault_inject_fini(&ns->fault_inject);
4004 4005 4006

	mutex_lock(&ns->ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
4007 4008
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
4009
	mutex_unlock(&ns->ctrl->subsys->lock);
4010

4011 4012 4013 4014
	synchronize_rcu(); /* guarantee not available in head->list */
	nvme_mpath_clear_current_path(ns);
	synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */

C
Christoph Hellwig 已提交
4015
	if (ns->disk->flags & GENHD_FL_UP) {
4016 4017
		del_gendisk(ns->disk);
		blk_cleanup_queue(ns->queue);
4018 4019
		if (blk_get_integrity(ns->disk))
			blk_integrity_unregister(ns->disk);
4020
	}
4021

4022
	down_write(&ns->ctrl->namespaces_rwsem);
4023
	list_del_init(&ns->list);
4024
	up_write(&ns->ctrl->namespaces_rwsem);
4025

4026
	nvme_mpath_check_last_path(ns);
4027 4028 4029
	nvme_put_ns(ns);
}

4030 4031 4032 4033 4034 4035 4036 4037 4038 4039
static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{
	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);

	if (ns) {
		nvme_ns_remove(ns);
		nvme_put_ns(ns);
	}
}

4040
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
4041 4042
{
	struct nvme_id_ns *id;
4043
	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
4044

4045 4046
	if (test_bit(NVME_NS_DEAD, &ns->flags))
		goto out;
C
Christoph Hellwig 已提交
4047

4048
	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
C
Christoph Hellwig 已提交
4049 4050 4051
	if (ret)
		goto out;

4052
	ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
C
Christoph Hellwig 已提交
4053
	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
4054
		dev_err(ns->ctrl->device,
C
Christoph Hellwig 已提交
4055
			"identifiers changed for nsid %d\n", ns->head->ns_id);
4056
		goto out_free_id;
C
Christoph Hellwig 已提交
4057 4058 4059
	}

	ret = nvme_update_ns_info(ns, id);
4060 4061

out_free_id:
C
Christoph Hellwig 已提交
4062 4063 4064
	kfree(id);
out:
	/*
4065
	 * Only remove the namespace if we got a fatal error back from the
C
Christoph Hellwig 已提交
4066
	 * device, otherwise ignore the error and just move on.
4067 4068
	 *
	 * TODO: we should probably schedule a delayed retry here.
C
Christoph Hellwig 已提交
4069
	 */
4070
	if (ret > 0 && (ret & NVME_SC_DNR))
4071
		nvme_ns_remove(ns);
C
Christoph Hellwig 已提交
4072 4073
}

4074
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
4075
{
4076
	struct nvme_ns_ids ids = { };
4077 4078
	struct nvme_ns *ns;

4079 4080
	if (nvme_identify_ns_descs(ctrl, nsid, &ids))
		return;
4081

4082
	ns = nvme_find_get_ns(ctrl, nsid);
4083
	if (ns) {
4084
		nvme_validate_ns(ns, &ids);
4085
		nvme_put_ns(ns);
4086 4087 4088
		return;
	}

4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
	switch (ids.csi) {
	case NVME_CSI_NVM:
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	case NVME_CSI_ZNS:
		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
			dev_warn(ctrl->device,
				"nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
				nsid);
			break;
		}
4100 4101 4102
		if (!nvme_multi_css(ctrl)) {
			dev_warn(ctrl->device,
				"command set not reported for nsid: %d\n",
4103
				nsid);
4104 4105
			break;
		}
4106 4107 4108 4109 4110 4111 4112
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	default:
		dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
			ids.csi, nsid);
		break;
	}
4113 4114
}

4115 4116 4117 4118
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{
	struct nvme_ns *ns, *next;
4119
	LIST_HEAD(rm_list);
4120

4121
	down_write(&ctrl->namespaces_rwsem);
4122
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4123
		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4124
			list_move_tail(&ns->list, &rm_list);
4125
	}
4126
	up_write(&ctrl->namespaces_rwsem);
4127 4128 4129 4130

	list_for_each_entry_safe(ns, next, &rm_list, list)
		nvme_ns_remove(ns);

4131 4132
}

4133
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4134
{
4135
	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4136
	__le32 *ns_list;
4137 4138
	u32 prev = 0;
	int ret = 0, i;
4139

4140 4141
	if (nvme_ctrl_limited_cns(ctrl))
		return -EOPNOTSUPP;
4142

4143
	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4144 4145 4146
	if (!ns_list)
		return -ENOMEM;

4147
	for (;;) {
4148 4149 4150 4151 4152 4153 4154 4155
		struct nvme_command cmd = {
			.identify.opcode	= nvme_admin_identify,
			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
			.identify.nsid		= cpu_to_le32(prev),
		};

		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
					    NVME_IDENTIFY_DATA_SIZE);
4156 4157 4158
		if (ret) {
			dev_warn(ctrl->device,
				"Identify NS List failed (status=0x%x)\n", ret);
4159
			goto free;
4160
		}
4161

4162
		for (i = 0; i < nr_entries; i++) {
4163
			u32 nsid = le32_to_cpu(ns_list[i]);
4164

4165 4166
			if (!nsid)	/* end of the list? */
				goto out;
4167
			nvme_validate_or_alloc_ns(ctrl, nsid);
4168 4169
			while (++prev < nsid)
				nvme_ns_remove_by_nsid(ctrl, prev);
4170 4171 4172
		}
	}
 out:
4173 4174
	nvme_remove_invalid_namespaces(ctrl, prev);
 free:
4175 4176 4177 4178
	kfree(ns_list);
	return ret;
}

4179
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4180
{
4181 4182 4183 4184 4185 4186 4187
	struct nvme_id_ctrl *id;
	u32 nn, i;

	if (nvme_identify_ctrl(ctrl, &id))
		return;
	nn = le32_to_cpu(id->nn);
	kfree(id);
4188

4189
	for (i = 1; i <= nn; i++)
4190
		nvme_validate_or_alloc_ns(ctrl, i);
4191

4192
	nvme_remove_invalid_namespaces(ctrl, nn);
4193 4194
}

4195
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4196 4197 4198
{
	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
	__le32 *log;
4199
	int error;
4200 4201 4202

	log = kzalloc(log_size, GFP_KERNEL);
	if (!log)
4203
		return;
4204

4205 4206 4207 4208 4209 4210
	/*
	 * We need to read the log to clear the AEN, but we don't want to rely
	 * on it for the changed namespace information as userspace could have
	 * raced with us in reading the log page, which could cause us to miss
	 * updates.
	 */
4211 4212
	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
			NVME_CSI_NVM, log, log_size, 0);
4213
	if (error)
4214 4215 4216 4217 4218 4219
		dev_warn(ctrl->device,
			"reading changed ns log failed: %d\n", error);

	kfree(log);
}

4220
static void nvme_scan_work(struct work_struct *work)
4221
{
4222 4223
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, scan_work);
4224

K
Keith Busch 已提交
4225 4226
	/* No tagset on a live ctrl means IO queues could not created */
	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4227 4228
		return;

D
Dan Carpenter 已提交
4229
	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4230
		dev_info(ctrl->device, "rescanning namespaces.\n");
4231
		nvme_clear_changed_ns_log(ctrl);
4232 4233
	}

4234
	mutex_lock(&ctrl->scan_lock);
4235 4236
	if (nvme_scan_ns_list(ctrl) != 0)
		nvme_scan_ns_sequential(ctrl);
4237
	mutex_unlock(&ctrl->scan_lock);
4238

4239
	down_write(&ctrl->namespaces_rwsem);
4240
	list_sort(NULL, &ctrl->namespaces, ns_cmp);
4241
	up_write(&ctrl->namespaces_rwsem);
4242
}
4243

4244 4245 4246 4247 4248
/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
4249 4250 4251
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;
4252
	LIST_HEAD(ns_list);
4253

4254 4255 4256 4257 4258 4259 4260
	/*
	 * make sure to requeue I/O to all namespaces as these
	 * might result from the scan itself and must complete
	 * for the scan_work to make progress
	 */
	nvme_mpath_clear_ctrl_paths(ctrl);

4261 4262 4263
	/* prevent racing with ns scanning */
	flush_work(&ctrl->scan_work);

4264 4265 4266 4267 4268 4269 4270 4271 4272
	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
	 * removing the namespaces' disks; fail all the queues now to avoid
	 * potentially having to clean up the failed sync later.
	 */
	if (ctrl->state == NVME_CTRL_DEAD)
		nvme_kill_queues(ctrl);

4273 4274 4275
	/* this is a no-op when called from the controller reset handler */
	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);

4276
	down_write(&ctrl->namespaces_rwsem);
4277
	list_splice_init(&ctrl->namespaces, &ns_list);
4278
	up_write(&ctrl->namespaces_rwsem);
4279 4280

	list_for_each_entry_safe(ns, next, &ns_list, list)
4281 4282
		nvme_ns_remove(ns);
}
4283
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4284

4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311
static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ret;

	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
	if (ret)
		return ret;

	if (opts) {
		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
				opts->trsvcid ?: "none");
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
				opts->host_traddr ?: "none");
	}
	return ret;
}

4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
	char *envp[2] = { NULL, NULL };
	u32 aen_result = ctrl->aen_result;

	ctrl->aen_result = 0;
	if (!aen_result)
		return;

	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
	if (!envp[0])
		return;
	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
	kfree(envp[0]);
}

4328 4329 4330 4331 4332
static void nvme_async_event_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, async_event_work);

4333
	nvme_aen_uevent(ctrl);
4334
	ctrl->ops->submit_async_event(ctrl);
4335 4336
}

4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{

	u32 csts;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
		return false;

	if (csts == ~0)
		return false;

	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
	struct nvme_fw_slot_info_log *log;

	log = kmalloc(sizeof(*log), GFP_KERNEL);
	if (!log)
		return;

4359 4360
	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
			log, sizeof(*log), 0))
4361
		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382
	kfree(log);
}

static void nvme_fw_act_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(work,
				struct nvme_ctrl, fw_act_work);
	unsigned long fw_act_timeout;

	if (ctrl->mtfa)
		fw_act_timeout = jiffies +
				msecs_to_jiffies(ctrl->mtfa * 100);
	else
		fw_act_timeout = jiffies +
				msecs_to_jiffies(admin_timeout * 1000);

	nvme_stop_queues(ctrl);
	while (nvme_ctrl_pp_status(ctrl)) {
		if (time_after(jiffies, fw_act_timeout)) {
			dev_warn(ctrl->device,
				"Fw activation timeout, reset controller\n");
4383 4384
			nvme_try_sched_reset(ctrl);
			return;
4385 4386 4387 4388
		}
		msleep(100);
	}

4389
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4390 4391 4392
		return;

	nvme_start_queues(ctrl);
4393
	/* read FW slot information to clear the AER */
4394 4395 4396
	nvme_get_fw_slot_info(ctrl);
}

4397 4398
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
4399 4400
	u32 aer_notice_type = (result & 0xff00) >> 8;

4401 4402
	trace_nvme_async_event(ctrl, aer_notice_type);

4403
	switch (aer_notice_type) {
4404
	case NVME_AER_NOTICE_NS_CHANGED:
D
Dan Carpenter 已提交
4405
		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4406 4407 4408
		nvme_queue_scan(ctrl);
		break;
	case NVME_AER_NOTICE_FW_ACT_STARTING:
4409 4410 4411 4412 4413 4414 4415
		/*
		 * We are (ab)using the RESETTING state to prevent subsequent
		 * recovery actions from interfering with the controller's
		 * firmware activation.
		 */
		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
			queue_work(nvme_wq, &ctrl->fw_act_work);
4416
		break;
C
Christoph Hellwig 已提交
4417 4418 4419 4420 4421 4422 4423
#ifdef CONFIG_NVME_MULTIPATH
	case NVME_AER_NOTICE_ANA:
		if (!ctrl->ana_log_buf)
			break;
		queue_work(nvme_wq, &ctrl->ana_work);
		break;
#endif
4424 4425 4426
	case NVME_AER_NOTICE_DISC_CHANGED:
		ctrl->aen_result = result;
		break;
4427 4428 4429 4430 4431
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
}

4432
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4433
		volatile union nvme_result *res)
4434
{
4435
	u32 result = le32_to_cpu(res->u32);
4436
	u32 aer_type = result & 0x07;
4437

4438
	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4439 4440
		return;

4441
	switch (aer_type) {
4442 4443 4444
	case NVME_AER_NOTICE:
		nvme_handle_aen_notice(ctrl, result);
		break;
4445 4446 4447 4448
	case NVME_AER_ERROR:
	case NVME_AER_SMART:
	case NVME_AER_CSS:
	case NVME_AER_VS:
4449
		trace_nvme_async_event(ctrl, aer_type);
4450
		ctrl->aen_result = result;
4451 4452 4453
		break;
	default:
		break;
4454
	}
4455
	queue_work(nvme_wq, &ctrl->async_event_work);
4456 4457
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4458

4459
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4460
{
C
Christoph Hellwig 已提交
4461
	nvme_mpath_stop(ctrl);
4462
	nvme_stop_keep_alive(ctrl);
4463
	nvme_stop_failfast_work(ctrl);
4464
	flush_work(&ctrl->async_event_work);
4465
	cancel_work_sync(&ctrl->fw_act_work);
4466 4467 4468 4469 4470
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
4471
	nvme_start_keep_alive(ctrl);
4472

4473 4474
	nvme_enable_aen(ctrl);

4475 4476 4477 4478 4479 4480
	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_start_queues(ctrl);
	}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4481

4482 4483
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
4484
	nvme_hwmon_exit(ctrl);
4485
	nvme_fault_inject_fini(&ctrl->fault_inject);
4486
	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4487
	cdev_device_del(&ctrl->cdev, ctrl->device);
4488
	nvme_put_ctrl(ctrl);
4489
}
4490
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4491

4492 4493 4494 4495 4496
static void nvme_free_cels(struct nvme_ctrl *ctrl)
{
	struct nvme_effects_log	*cel;
	unsigned long i;

4497
	xa_for_each(&ctrl->cels, i, cel) {
4498 4499 4500 4501 4502 4503 4504
		xa_erase(&ctrl->cels, i);
		kfree(cel);
	}

	xa_destroy(&ctrl->cels);
}

4505
static void nvme_free_ctrl(struct device *dev)
4506
{
4507 4508
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
C
Christoph Hellwig 已提交
4509
	struct nvme_subsystem *subsys = ctrl->subsys;
4510

K
Keith Busch 已提交
4511
	if (!subsys || ctrl->instance != subsys->instance)
4512 4513
		ida_simple_remove(&nvme_instance_ida, ctrl->instance);

4514
	nvme_free_cels(ctrl);
C
Christoph Hellwig 已提交
4515
	nvme_mpath_uninit(ctrl);
S
Sagi Grimberg 已提交
4516
	__free_page(ctrl->discard_page);
4517

C
Christoph Hellwig 已提交
4518
	if (subsys) {
4519
		mutex_lock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4520 4521
		list_del(&ctrl->subsys_entry);
		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4522
		mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4523
	}
4524 4525 4526

	ctrl->ops->free_ctrl(ctrl);

C
Christoph Hellwig 已提交
4527 4528
	if (subsys)
		nvme_put_subsystem(subsys);
4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540
}

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
	int ret;

4541
	ctrl->state = NVME_CTRL_NEW;
4542
	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4543
	spin_lock_init(&ctrl->lock);
4544
	mutex_init(&ctrl->scan_lock);
4545
	INIT_LIST_HEAD(&ctrl->namespaces);
4546
	xa_init(&ctrl->cels);
4547
	init_rwsem(&ctrl->namespaces_rwsem);
4548 4549 4550
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->quirks = quirks;
4551
	ctrl->numa_node = NUMA_NO_NODE;
4552
	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4553
	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4554
	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4555
	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4556
	init_waitqueue_head(&ctrl->state_wq);
4557

4558
	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4559
	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4560 4561 4562
	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;

4563 4564 4565 4566 4567 4568 4569 4570
	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
			PAGE_SIZE);
	ctrl->discard_page = alloc_page(GFP_KERNEL);
	if (!ctrl->discard_page) {
		ret = -ENOMEM;
		goto out;
	}

4571 4572
	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
	if (ret < 0)
4573
		goto out;
4574
	ctrl->instance = ret;
4575

4576 4577
	device_initialize(&ctrl->ctrl_device);
	ctrl->device = &ctrl->ctrl_device;
4578 4579
	ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
			ctrl->instance);
4580 4581 4582 4583 4584 4585 4586
	ctrl->device->class = nvme_class;
	ctrl->device->parent = ctrl->dev;
	ctrl->device->groups = nvme_dev_attr_groups;
	ctrl->device->release = nvme_free_ctrl;
	dev_set_drvdata(ctrl->device, ctrl);
	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
	if (ret)
4587 4588
		goto out_release_instance;

4589
	nvme_get_ctrl(ctrl);
4590 4591 4592
	cdev_init(&ctrl->cdev, &nvme_dev_fops);
	ctrl->cdev.owner = ops->module;
	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4593 4594
	if (ret)
		goto out_free_name;
4595

4596 4597 4598 4599 4600 4601 4602 4603
	/*
	 * Initialize latency tolerance controls.  The sysfs files won't
	 * be visible to userspace unless the device actually supports APST.
	 */
	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
		min(default_ps_max_latency_us, (unsigned long)S32_MAX));

4604 4605
	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));

4606
	return 0;
4607
out_free_name:
4608
	nvme_put_ctrl(ctrl);
4609
	kfree_const(ctrl->device->kobj.name);
4610
out_release_instance:
4611
	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4612
out:
4613 4614
	if (ctrl->discard_page)
		__free_page(ctrl->discard_page);
4615 4616
	return ret;
}
4617
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4618

4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629
/**
 * nvme_kill_queues(): Ends all namespace queues
 * @ctrl: the dead controller that needs to end
 *
 * Call this function when the driver determines it is unable to get the
 * controller in a state capable of servicing IO.
 */
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4630
	down_read(&ctrl->namespaces_rwsem);
M
Ming Lei 已提交
4631

4632
	/* Forcibly unquiesce queues to avoid blocking dispatch */
I
Igor Konopko 已提交
4633
	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4634
		blk_mq_unquiesce_queue(ctrl->admin_q);
4635

4636 4637
	list_for_each_entry(ns, &ctrl->namespaces, list)
		nvme_set_queue_dying(ns);
4638

4639
	up_read(&ctrl->namespaces_rwsem);
4640
}
4641
EXPORT_SYMBOL_GPL(nvme_kill_queues);
4642

K
Keith Busch 已提交
4643 4644 4645 4646
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4647
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4648 4649
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_unfreeze_queue(ns->queue);
4650
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4651 4652 4653
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);

4654
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
K
Keith Busch 已提交
4655 4656 4657
{
	struct nvme_ns *ns;

4658
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4659 4660 4661 4662 4663
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
		if (timeout <= 0)
			break;
	}
4664
	up_read(&ctrl->namespaces_rwsem);
4665
	return timeout;
K
Keith Busch 已提交
4666 4667 4668 4669 4670 4671 4672
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4673
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4674 4675
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_freeze_queue_wait(ns->queue);
4676
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4677 4678 4679 4680 4681 4682 4683
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4684
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4685
	list_for_each_entry(ns, &ctrl->namespaces, list)
4686
		blk_freeze_queue_start(ns->queue);
4687
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4688 4689 4690
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);

4691
void nvme_stop_queues(struct nvme_ctrl *ctrl)
4692 4693 4694
{
	struct nvme_ns *ns;

4695
	down_read(&ctrl->namespaces_rwsem);
4696
	list_for_each_entry(ns, &ctrl->namespaces, list)
4697
		blk_mq_quiesce_queue(ns->queue);
4698
	up_read(&ctrl->namespaces_rwsem);
4699
}
4700
EXPORT_SYMBOL_GPL(nvme_stop_queues);
4701

4702
void nvme_start_queues(struct nvme_ctrl *ctrl)
4703 4704 4705
{
	struct nvme_ns *ns;

4706
	down_read(&ctrl->namespaces_rwsem);
4707
	list_for_each_entry(ns, &ctrl->namespaces, list)
4708
		blk_mq_unquiesce_queue(ns->queue);
4709
	up_read(&ctrl->namespaces_rwsem);
4710
}
4711
EXPORT_SYMBOL_GPL(nvme_start_queues);
4712

C
Chao Leng 已提交
4713
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
K
Keith Busch 已提交
4714 4715 4716 4717 4718 4719 4720
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_sync_queue(ns->queue);
	up_read(&ctrl->namespaces_rwsem);
C
Chao Leng 已提交
4721 4722
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4723

C
Chao Leng 已提交
4724 4725 4726
void nvme_sync_queues(struct nvme_ctrl *ctrl)
{
	nvme_sync_io_queues(ctrl);
4727 4728
	if (ctrl->admin_q)
		blk_sync_queue(ctrl->admin_q);
K
Keith Busch 已提交
4729 4730 4731
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);

4732
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4733
{
4734 4735 4736
	if (file->f_op != &nvme_dev_fops)
		return NULL;
	return file->private_data;
4737
}
4738
EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4739

4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757
/*
 * Check we didn't inadvertently grow the command structure sizes:
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
K
Keith Busch 已提交
4758 4759
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4760 4761 4762 4763 4764 4765 4766
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
}


4767
static int __init nvme_core_init(void)
4768
{
4769
	int result = -ENOMEM;
4770

4771 4772
	_nvme_check_size();

4773 4774 4775
	nvme_wq = alloc_workqueue("nvme-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_wq)
4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786
		goto out;

	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_reset_wq)
		goto destroy_wq;

	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_delete_wq)
		goto destroy_reset_wq;
4787

4788 4789
	result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
			NVME_MINORS, "nvme");
4790
	if (result < 0)
4791
		goto destroy_delete_wq;
4792 4793 4794 4795 4796 4797

	nvme_class = class_create(THIS_MODULE, "nvme");
	if (IS_ERR(nvme_class)) {
		result = PTR_ERR(nvme_class);
		goto unregister_chrdev;
	}
4798
	nvme_class->dev_uevent = nvme_class_uevent;
4799

C
Christoph Hellwig 已提交
4800 4801 4802 4803 4804
	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
	if (IS_ERR(nvme_subsys_class)) {
		result = PTR_ERR(nvme_subsys_class);
		goto destroy_class;
	}
4805
	return 0;
4806

C
Christoph Hellwig 已提交
4807 4808
destroy_class:
	class_destroy(nvme_class);
4809
unregister_chrdev:
4810
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4811 4812 4813 4814
destroy_delete_wq:
	destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
	destroy_workqueue(nvme_reset_wq);
4815 4816
destroy_wq:
	destroy_workqueue(nvme_wq);
4817
out:
4818
	return result;
4819 4820
}

4821
static void __exit nvme_core_exit(void)
4822
{
C
Christoph Hellwig 已提交
4823
	class_destroy(nvme_subsys_class);
4824
	class_destroy(nvme_class);
4825
	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4826 4827
	destroy_workqueue(nvme_delete_wq);
	destroy_workqueue(nvme_reset_wq);
4828
	destroy_workqueue(nvme_wq);
M
Max Gurtovoy 已提交
4829
	ida_destroy(&nvme_instance_ida);
4830
}
4831 4832 4833 4834 4835

MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
module_init(nvme_core_init);
module_exit(nvme_core_exit);