core.c 120.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
9
#include <linux/compat.h>
10
#include <linux/delay.h>
11
#include <linux/errno.h>
12
#include <linux/hdreg.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/backing-dev.h>
16
#include <linux/list_sort.h>
17 18
#include <linux/slab.h>
#include <linux/types.h>
19 20 21
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
22
#include <linux/pm_qos.h>
23
#include <asm/unaligned.h>
24 25

#include "nvme.h"
S
Sagi Grimberg 已提交
26
#include "fabrics.h"
27

H
Hannes Reinecke 已提交
28 29 30
#define CREATE_TRACE_POINTS
#include "trace.h"

31 32
#define NVME_MINORS		(1U << MINORBITS)

33 34
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
35
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36
EXPORT_SYMBOL_GPL(admin_timeout);
37

38 39
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41
EXPORT_SYMBOL_GPL(nvme_io_timeout);
42

43
static unsigned char shutdown_timeout = 5;
44 45 46
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

47 48
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
K
Keith Busch 已提交
49
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50

51
static unsigned long default_ps_max_latency_us = 100000;
52 53 54 55
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

56 57 58 59
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

60 61 62 63
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");

64 65 66 67 68
/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
69 70
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
71 72 73 74
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
75 76 77
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

78 79 80 81 82 83
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);

C
Christoph Hellwig 已提交
84 85
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
86

87
static DEFINE_IDA(nvme_instance_ida);
88
static dev_t nvme_chr_devt;
89
static struct class *nvme_class;
C
Christoph Hellwig 已提交
90
static struct class *nvme_subsys_class;
91

K
Keith Busch 已提交
92
static int _nvme_revalidate_disk(struct gendisk *disk);
93
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94 95 96 97 98 99 100 101 102
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);

static void nvme_set_queue_dying(struct nvme_ns *ns)
{
	/*
	 * Revalidating a dead namespace sets capacity to 0. This will end
	 * buffered writers dirtying pages that can't be synced.
	 */
C
Christoph Hellwig 已提交
103
	if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
104 105 106 107
		return;
	blk_set_queue_dying(ns->queue);
	/* Forcibly unquiesce queues to avoid blocking dispatch */
	blk_mq_unquiesce_queue(ns->queue);
108 109 110 111
	/*
	 * Revalidate after unblocking dispatchers that may be holding bd_butex
	 */
	revalidate_disk(ns->disk);
112
}
113

114 115 116 117 118
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
	/*
	 * Only new queue scan work when admin and IO queues are both alive
	 */
K
Keith Busch 已提交
119
	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
120 121 122
		queue_work(nvme_wq, &ctrl->scan_work);
}

123 124 125 126 127 128
/*
 * Use this function to proceed with scheduling reset_work for a controller
 * that had previously been set to the resetting state. This is intended for
 * code paths that can't be interrupted by other reset attempts. A hot removal
 * may prevent this from succeeding.
 */
129
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
130 131 132 133 134 135 136
{
	if (ctrl->state != NVME_CTRL_RESETTING)
		return -EBUSY;
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
		return -EBUSY;
	return 0;
}
137
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
138

139 140 141 142
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
143
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
144 145 146 147 148
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

S
Sagi Grimberg 已提交
149
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
150 151 152 153
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
154
	if (!ret) {
155
		flush_work(&ctrl->reset_work);
K
Keith Busch 已提交
156
		if (ctrl->state != NVME_CTRL_LIVE)
157 158 159
			ret = -ENETRESET;
	}

160 161
	return ret;
}
S
Sagi Grimberg 已提交
162
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
163

164
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
165
{
166 167 168
	dev_info(ctrl->device,
		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);

169
	flush_work(&ctrl->reset_work);
170 171
	nvme_stop_ctrl(ctrl);
	nvme_remove_namespaces(ctrl);
172
	ctrl->ops->delete_ctrl(ctrl);
173
	nvme_uninit_ctrl(ctrl);
174 175
}

176 177 178 179 180 181 182 183
static void nvme_delete_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, delete_work);

	nvme_do_delete_ctrl(ctrl);
}

184 185 186 187
int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
		return -EBUSY;
188
	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
189 190 191 192 193
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);

194
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
195 196
{
	/*
197 198
	 * Keep a reference until nvme_do_delete_ctrl() complete,
	 * since ->delete_ctrl can free the controller.
199 200
	 */
	nvme_get_ctrl(ctrl);
201
	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
202
		nvme_do_delete_ctrl(ctrl);
203 204 205
	nvme_put_ctrl(ctrl);
}

206
static blk_status_t nvme_error_status(u16 status)
207
{
208
	switch (status & 0x7ff) {
209
	case NVME_SC_SUCCESS:
210
		return BLK_STS_OK;
211
	case NVME_SC_CAP_EXCEEDED:
212
		return BLK_STS_NOSPC;
213
	case NVME_SC_LBA_RANGE:
214 215
	case NVME_SC_CMD_INTERRUPTED:
	case NVME_SC_NS_NOT_READY:
216 217
		return BLK_STS_TARGET;
	case NVME_SC_BAD_ATTRIBUTES:
218
	case NVME_SC_ONCS_NOT_SUPPORTED:
219 220 221
	case NVME_SC_INVALID_OPCODE:
	case NVME_SC_INVALID_FIELD:
	case NVME_SC_INVALID_NS:
222
		return BLK_STS_NOTSUPP;
223 224 225
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
226 227
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
228
	case NVME_SC_COMPARE_FAILED:
229
		return BLK_STS_MEDIUM;
230 231 232 233 234 235 236
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
237 238
	case NVME_SC_HOST_PATH_ERROR:
		return BLK_STS_TRANSPORT;
239 240
	default:
		return BLK_STS_IOERR;
241 242 243
	}
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
static void nvme_retry_req(struct request *req)
{
	struct nvme_ns *ns = req->q->queuedata;
	unsigned long delay = 0;
	u16 crd;

	/* The mask and shift result must be <= 3 */
	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
	if (ns && crd)
		delay = ns->ctrl->crdt[crd - 1] * 100;

	nvme_req(req)->retries++;
	blk_mq_requeue_request(req, false);
	blk_mq_delay_kick_requeue_list(req->q, delay);
}

260 261 262 263 264 265 266
enum nvme_disposition {
	COMPLETE,
	RETRY,
	FAILOVER,
};

static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
267
{
268 269
	if (likely(nvme_req(req)->status == 0))
		return COMPLETE;
270

271 272 273 274
	if (blk_noretry_request(req) ||
	    (nvme_req(req)->status & NVME_SC_DNR) ||
	    nvme_req(req)->retries >= nvme_max_retries)
		return COMPLETE;
275

276
	if (req->cmd_flags & REQ_NVME_MPATH) {
277 278
		if (nvme_is_path_error(nvme_req(req)->status) ||
		    blk_queue_dying(req->q))
279
			return FAILOVER;
280 281 282
	} else {
		if (blk_queue_dying(req->q))
			return COMPLETE;
283
	}
284

285 286 287 288 289 290
	return RETRY;
}

static inline void nvme_end_req(struct request *req)
{
	blk_status_t status = nvme_error_status(nvme_req(req)->status);
291

292 293
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
	    req_op(req) == REQ_OP_ZONE_APPEND)
K
Keith Busch 已提交
294 295
		req->__sector = nvme_lba_to_sect(req->q->queuedata,
			le64_to_cpu(nvme_req(req)->result.u64));
H
Hannes Reinecke 已提交
296 297

	nvme_trace_bio_complete(req, status);
298
	blk_mq_end_request(req, status);
299
}
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320

void nvme_complete_rq(struct request *req)
{
	trace_nvme_complete_rq(req);
	nvme_cleanup_cmd(req);

	if (nvme_req(req)->ctrl->kas)
		nvme_req(req)->ctrl->comp_seen = true;

	switch (nvme_decide_disposition(req)) {
	case COMPLETE:
		nvme_end_req(req);
		return;
	case RETRY:
		nvme_retry_req(req);
		return;
	case FAILOVER:
		nvme_failover_req(req);
		return;
	}
}
321 322
EXPORT_SYMBOL_GPL(nvme_complete_rq);

323
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
324 325 326 327
{
	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

328 329 330 331
	/* don't abort one completed request */
	if (blk_mq_request_completed(req))
		return true;

332
	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
333
	blk_mq_complete_request(req);
334
	return true;
335 336 337
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

338 339 340
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
341
	enum nvme_ctrl_state old_state;
342
	unsigned long flags;
343 344
	bool changed = false;

345
	spin_lock_irqsave(&ctrl->lock, flags);
346 347

	old_state = ctrl->state;
348 349 350
	switch (new_state) {
	case NVME_CTRL_LIVE:
		switch (old_state) {
351
		case NVME_CTRL_NEW:
352
		case NVME_CTRL_RESETTING:
353
		case NVME_CTRL_CONNECTING:
354 355 356 357 358 359 360 361 362
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
363 364 365 366 367 368 369
		case NVME_CTRL_LIVE:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
370
	case NVME_CTRL_CONNECTING:
371
		switch (old_state) {
372
		case NVME_CTRL_NEW:
373
		case NVME_CTRL_RESETTING:
374 375 376 377 378 379 380 381 382 383
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
		case NVME_CTRL_RESETTING:
384
		case NVME_CTRL_CONNECTING:
385 386 387 388 389 390
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
391 392 393 394 395 396 397 398 399 400
	case NVME_CTRL_DELETING_NOIO:
		switch (old_state) {
		case NVME_CTRL_DELETING:
		case NVME_CTRL_DEAD:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
401 402 403 404 405 406 407 408 409
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
410 411 412 413
	default:
		break;
	}

414
	if (changed) {
415
		ctrl->state = new_state;
416 417
		wake_up_all(&ctrl->state_wq);
	}
418

419
	spin_unlock_irqrestore(&ctrl->lock, flags);
420 421
	if (changed && ctrl->state == NVME_CTRL_LIVE)
		nvme_kick_requeue_lists(ctrl);
422 423 424 425
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

426 427 428 429 430 431 432 433 434 435 436 437
/*
 * Returns true for sink states that can't ever transition back to live.
 */
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
{
	switch (ctrl->state) {
	case NVME_CTRL_NEW:
	case NVME_CTRL_LIVE:
	case NVME_CTRL_RESETTING:
	case NVME_CTRL_CONNECTING:
		return false;
	case NVME_CTRL_DELETING:
438
	case NVME_CTRL_DELETING_NOIO:
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	case NVME_CTRL_DEAD:
		return true;
	default:
		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
		return true;
	}
}

/*
 * Waits for the controller state to be resetting, or returns false if it is
 * not possible to ever transition to that state.
 */
bool nvme_wait_reset(struct nvme_ctrl *ctrl)
{
	wait_event(ctrl->state_wq,
		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
		   nvme_state_terminal(ctrl));
	return ctrl->state == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);

C
Christoph Hellwig 已提交
460 461 462 463 464
static void nvme_free_ns_head(struct kref *ref)
{
	struct nvme_ns_head *head =
		container_of(ref, struct nvme_ns_head, ref);

465
	nvme_mpath_remove_disk(head);
C
Christoph Hellwig 已提交
466
	ida_simple_remove(&head->subsys->ns_ida, head->instance);
467
	cleanup_srcu_struct(&head->srcu);
468
	nvme_put_subsystem(head->subsys);
C
Christoph Hellwig 已提交
469 470 471 472 473 474 475 476
	kfree(head);
}

static void nvme_put_ns_head(struct nvme_ns_head *head)
{
	kref_put(&head->ref, nvme_free_ns_head);
}

477 478 479 480
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

481 482
	if (ns->ndev)
		nvme_nvm_unregister(ns);
483 484

	put_disk(ns->disk);
C
Christoph Hellwig 已提交
485
	nvme_put_ns_head(ns->head);
486
	nvme_put_ctrl(ns->ctrl);
487 488 489
	kfree(ns);
}

490
void nvme_put_ns(struct nvme_ns *ns)
491 492 493
{
	kref_put(&ns->kref, nvme_free_ns);
}
494
EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
495

496 497 498 499 500 501 502 503 504
static inline void nvme_clear_nvme_request(struct request *req)
{
	if (!(req->rq_flags & RQF_DONTPREP)) {
		nvme_req(req)->retries = 0;
		nvme_req(req)->flags = 0;
		req->rq_flags |= RQF_DONTPREP;
	}
}

505
struct request *nvme_alloc_request(struct request_queue *q,
506
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
507
{
508
	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
509 510
	struct request *req;

511
	if (qid == NVME_QID_ANY) {
512
		req = blk_mq_alloc_request(q, op, flags);
513
	} else {
514
		req = blk_mq_alloc_request_hctx(q, op, flags,
515 516
				qid ? qid - 1 : 0);
	}
517
	if (IS_ERR(req))
518
		return req;
519 520

	req->cmd_flags |= REQ_FAILFAST_DRIVER;
521
	nvme_clear_nvme_request(req);
522
	nvme_req(req)->cmd = cmd;
523

524 525
	return req;
}
526
EXPORT_SYMBOL_GPL(nvme_alloc_request);
527

528 529 530 531 532 533 534
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));

	c.directive.opcode = nvme_admin_directive_send;
A
Arnav Dawn 已提交
535
	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
	c.directive.dtype = NVME_DIR_IDENTIFY;
	c.directive.tdtype = NVME_DIR_STREAMS;
	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
}

static int nvme_disable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, false);
}

static int nvme_enable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, true);
}

static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
				  struct streams_directive_params *s, u32 nsid)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	memset(s, 0, sizeof(*s));

	c.directive.opcode = nvme_admin_directive_recv;
	c.directive.nsid = cpu_to_le32(nsid);
K
Keith Busch 已提交
564
	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
	c.directive.dtype = NVME_DIR_STREAMS;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
}

static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
	struct streams_directive_params s;
	int ret;

	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
		return 0;
	if (!streams)
		return 0;

	ret = nvme_enable_streams(ctrl);
	if (ret)
		return ret;

A
Arnav Dawn 已提交
585
	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
586
	if (ret)
587
		goto out_disable_stream;
588 589 590 591 592

	ctrl->nssa = le16_to_cpu(s.nssa);
	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
		dev_info(ctrl->device, "too few streams (%u) available\n",
					ctrl->nssa);
593
		goto out_disable_stream;
594 595
	}

596
	ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
597 598
	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
	return 0;
599 600 601 602

out_disable_stream:
	nvme_disable_streams(ctrl);
	return ret;
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
}

/*
 * Check if 'req' has a write hint associated with it. If it does, assign
 * a valid namespace stream to the write.
 */
static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
				     struct request *req, u16 *control,
				     u32 *dsmgmt)
{
	enum rw_hint streamid = req->write_hint;

	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
		streamid = 0;
	else {
		streamid--;
		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
			return;

		*control |= NVME_RW_DTYPE_STREAMS;
		*dsmgmt |= streamid << 16;
	}

	if (streamid < ARRAY_SIZE(req->q->write_hints))
		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}

630 631 632 633 634 635 636 637
static void nvme_setup_passthrough(struct request *req,
		struct nvme_command *cmd)
{
	memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
	/* passthru commands should let the driver set the SGL flags */
	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
}

M
Ming Lin 已提交
638 639 640 641
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
	cmnd->common.opcode = nvme_cmd_flush;
C
Christoph Hellwig 已提交
642
	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
M
Ming Lin 已提交
643 644
}

645
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
646 647
		struct nvme_command *cmnd)
{
648
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
M
Ming Lin 已提交
649
	struct nvme_dsm_range *range;
650
	struct bio *bio;
M
Ming Lin 已提交
651

652 653 654 655 656 657 658 659
	/*
	 * Some devices do not consider the DSM 'Number of Ranges' field when
	 * determining how much data to DMA. Always allocate memory for maximum
	 * number of segments to prevent device reading beyond end of buffer.
	 */
	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;

	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
660 661 662 663 664 665 666 667 668 669 670
	if (!range) {
		/*
		 * If we fail allocation our range, fallback to the controller
		 * discard page. If that's also busy, it's safe to return
		 * busy, as we know we can make progress once that's freed.
		 */
		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
			return BLK_STS_RESOURCE;

		range = page_address(ns->ctrl->discard_page);
	}
M
Ming Lin 已提交
671

672
	__rq_for_each_bio(bio, req) {
673
		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
674 675
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

K
Keith Busch 已提交
676 677 678 679 680
		if (n < segments) {
			range[n].cattr = cpu_to_le32(0);
			range[n].nlb = cpu_to_le32(nlb);
			range[n].slba = cpu_to_le64(slba);
		}
681 682 683 684
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
685 686 687 688
		if (virt_to_page(range) == ns->ctrl->discard_page)
			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
		else
			kfree(range);
689
		return BLK_STS_IOERR;
690
	}
M
Ming Lin 已提交
691 692

	cmnd->dsm.opcode = nvme_cmd_dsm;
C
Christoph Hellwig 已提交
693
	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
694
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
M
Ming Lin 已提交
695 696
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

697 698
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
699
	req->special_vec.bv_len = alloc_size;
700
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
M
Ming Lin 已提交
701

702
	return BLK_STS_OK;
M
Ming Lin 已提交
703 704
}

705 706 707 708 709 710 711 712 713
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
{
	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		return nvme_setup_discard(ns, req, cmnd);

	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
	cmnd->write_zeroes.slba =
714
		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
715 716 717 718 719 720
	cmnd->write_zeroes.length =
		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
	cmnd->write_zeroes.control = 0;
	return BLK_STS_OK;
}

721
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
K
Keith Busch 已提交
722 723
		struct request *req, struct nvme_command *cmnd,
		enum nvme_opcode op)
M
Ming Lin 已提交
724
{
725
	struct nvme_ctrl *ctrl = ns->ctrl;
M
Ming Lin 已提交
726 727 728 729 730 731 732 733 734 735 736
	u16 control = 0;
	u32 dsmgmt = 0;

	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

K
Keith Busch 已提交
737
	cmnd->rw.opcode = op;
C
Christoph Hellwig 已提交
738
	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
739
	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
M
Ming Lin 已提交
740 741
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);

742 743 744
	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);

M
Ming Lin 已提交
745
	if (ns->ms) {
746 747 748 749 750 751 752 753 754 755 756 757
		/*
		 * If formated with metadata, the block layer always provides a
		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
		 * we enable the PRACT bit for protection information or set the
		 * namespace capacity to zero to prevent any I/O.
		 */
		if (!blk_integrity_rq(req)) {
			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
				return BLK_STS_NOTSUPP;
			control |= NVME_RW_PRINFO_PRACT;
		}

M
Ming Lin 已提交
758 759 760 761 762 763 764 765
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
K
Keith Busch 已提交
766 767
			if (op == nvme_cmd_zone_append)
				control |= NVME_RW_APPEND_PIREMAP;
768
			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
M
Ming Lin 已提交
769 770 771 772 773 774
			break;
		}
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
775
	return 0;
M
Ming Lin 已提交
776 777
}

778 779 780
void nvme_cleanup_cmd(struct request *req)
{
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
781 782 783 784 785 786 787
		struct nvme_ns *ns = req->rq_disk->private_data;
		struct page *page = req->special_vec.bv_page;

		if (page == ns->ctrl->discard_page)
			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
		else
			kfree(page_address(page) + req->special_vec.bv_offset);
788 789 790 791
	}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

792
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
793 794
		struct nvme_command *cmd)
{
795
	blk_status_t ret = BLK_STS_OK;
M
Ming Lin 已提交
796

797
	nvme_clear_nvme_request(req);
798

799
	memset(cmd, 0, sizeof(*cmd));
800 801 802
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
803
		nvme_setup_passthrough(req, cmd);
804 805
		break;
	case REQ_OP_FLUSH:
M
Ming Lin 已提交
806
		nvme_setup_flush(ns, cmd);
807
		break;
K
Keith Busch 已提交
808 809 810 811 812 813 814 815 816 817 818 819 820
	case REQ_OP_ZONE_RESET_ALL:
	case REQ_OP_ZONE_RESET:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
		break;
	case REQ_OP_ZONE_OPEN:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
		break;
	case REQ_OP_ZONE_CLOSE:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
		break;
	case REQ_OP_ZONE_FINISH:
		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
		break;
821
	case REQ_OP_WRITE_ZEROES:
822 823
		ret = nvme_setup_write_zeroes(ns, req, cmd);
		break;
824
	case REQ_OP_DISCARD:
M
Ming Lin 已提交
825
		ret = nvme_setup_discard(ns, req, cmd);
826 827
		break;
	case REQ_OP_READ:
K
Keith Busch 已提交
828 829
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
		break;
830
	case REQ_OP_WRITE:
K
Keith Busch 已提交
831 832 833 834
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
		break;
	case REQ_OP_ZONE_APPEND:
		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
835 836 837
		break;
	default:
		WARN_ON_ONCE(1);
838
		return BLK_STS_IOERR;
839
	}
M
Ming Lin 已提交
840

841
	cmd->common.command_id = req->tag;
K
Keith Busch 已提交
842
	trace_nvme_setup_cmd(req, cmd);
M
Ming Lin 已提交
843 844 845 846
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = NULL;
	complete(waiting);
}

static void nvme_execute_rq_polled(struct request_queue *q,
		struct gendisk *bd_disk, struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);

	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));

	rq->cmd_flags |= REQ_HIPRI;
	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);

	while (!completion_done(&wait)) {
		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
		cond_resched();
	}
}

872 873 874 875 876
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
877
		union nvme_result *result, void *buffer, unsigned bufflen,
878
		unsigned timeout, int qid, int at_head,
879
		blk_mq_req_flags_t flags, bool poll)
880 881 882 883
{
	struct request *req;
	int ret;

884
	req = nvme_alloc_request(q, cmd, flags, qid);
885 886 887 888 889
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;

890 891 892 893
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
894 895
	}

896 897 898 899
	if (poll)
		nvme_execute_rq_polled(req->q, NULL, req, at_head);
	else
		blk_execute_rq(req->q, NULL, req, at_head);
900 901
	if (result)
		*result = nvme_req(req)->result;
902 903 904 905
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
906 907 908 909
 out:
	blk_mq_free_request(req);
	return ret;
}
910
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
911 912 913 914

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
915
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
916
			NVME_QID_ANY, 0, 0, false);
917
}
918
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
919

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
		unsigned len, u32 seed, bool write)
{
	struct bio_integrity_payload *bip;
	int ret = -ENOMEM;
	void *buf;

	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		goto out;

	ret = -EFAULT;
	if (write && copy_from_user(buf, ubuf, len))
		goto out_free_meta;

	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
	if (IS_ERR(bip)) {
		ret = PTR_ERR(bip);
		goto out_free_meta;
	}

	bip->bip_iter.bi_size = len;
	bip->bip_iter.bi_sector = seed;
	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
			offset_in_page(buf));
	if (ret == len)
		return buf;
	ret = -ENOMEM;
out_free_meta:
	kfree(buf);
out:
	return ERR_PTR(ret);
}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
		return NVME_CMD_EFFECTS_CSE_MASK;
	default:
		break;
	}
	return 0;
}

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = 0;

	if (ns) {
		if (ns->head->effects)
			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
			dev_warn(ctrl->device,
				 "IO command:%02x has unhandled effects:%08x\n",
				 opcode, effects);
		return 0;
	}

	if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
	effects |= nvme_known_admin_effects(opcode);

	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);

static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			       u8 opcode)
{
	u32 effects = nvme_command_effects(ctrl, ns, opcode);

	/*
	 * For simplicity, IO to all namespaces is quiesced even if the command
	 * effects say only one namespace is affected.
	 */
	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
		mutex_lock(&ctrl->scan_lock);
		mutex_lock(&ctrl->subsys->lock);
		nvme_mpath_start_freeze(ctrl->subsys);
		nvme_mpath_wait_freeze(ctrl->subsys);
		nvme_start_freeze(ctrl);
		nvme_wait_freeze(ctrl);
	}
	return effects;
}

static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		if (_nvme_revalidate_disk(ns->disk))
			nvme_set_queue_dying(ns);
		else if (blk_queue_is_zoned(ns->disk->queue)) {
			/*
			 * IO commands are required to fully revalidate a zoned
			 * device. Force the command effects to trigger rescan
			 * work so report zones can run in a context with
			 * unfrozen IO queues.
			 */
			*effects |= NVME_CMD_EFFECTS_NCC;
		}
	up_read(&ctrl->namespaces_rwsem);
}

static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
{
	/*
	 * Revalidate LBA changes prior to unfreezing. This is necessary to
	 * prevent memory corruption if a logical block size was changed by
	 * this command.
	 */
	if (effects & NVME_CMD_EFFECTS_LBCC)
		nvme_update_formats(ctrl, &effects);
	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
		nvme_unfreeze(ctrl);
		nvme_mpath_unfreeze(ctrl->subsys);
		mutex_unlock(&ctrl->subsys->lock);
		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
		mutex_unlock(&ctrl->scan_lock);
	}
	if (effects & NVME_CMD_EFFECTS_CCC)
		nvme_init_identify(ctrl);
	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
		nvme_queue_scan(ctrl);
		flush_work(&ctrl->scan_work);
	}
}

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
void nvme_execute_passthru_rq(struct request *rq)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;
	struct gendisk *disk = ns ? ns->disk : NULL;
	u32 effects;

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
	blk_execute_rq(rq->q, disk, rq, 0);
	nvme_passthru_end(ctrl, effects);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);

1068
static int nvme_submit_user_cmd(struct request_queue *q,
1069 1070
		struct nvme_command *cmd, void __user *ubuffer,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
1071
		u32 meta_seed, u64 *result, unsigned timeout)
1072
{
1073
	bool write = nvme_is_write(cmd);
1074 1075
	struct nvme_ns *ns = q->queuedata;
	struct gendisk *disk = ns ? ns->disk : NULL;
1076
	struct request *req;
1077 1078
	struct bio *bio = NULL;
	void *meta = NULL;
1079 1080
	int ret;

1081
	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
1082 1083 1084 1085
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
1086
	nvme_req(req)->flags |= NVME_REQ_USERCMD;
1087 1088

	if (ubuffer && bufflen) {
1089 1090 1091 1092 1093
		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
				GFP_KERNEL);
		if (ret)
			goto out;
		bio = req->bio;
1094
		bio->bi_disk = disk;
1095 1096 1097 1098 1099
		if (disk && meta_buffer && meta_len) {
			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
					meta_seed, write);
			if (IS_ERR(meta)) {
				ret = PTR_ERR(meta);
1100 1101
				goto out_unmap;
			}
1102
			req->cmd_flags |= REQ_INTEGRITY;
1103 1104
		}
	}
1105

1106
	nvme_execute_passthru_rq(req);
1107 1108 1109 1110
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
1111
	if (result)
1112
		*result = le64_to_cpu(nvme_req(req)->result.u64);
1113 1114 1115 1116 1117 1118
	if (meta && !ret && !write) {
		if (copy_to_user(meta_buffer, meta, meta_len))
			ret = -EFAULT;
	}
	kfree(meta);
 out_unmap:
1119
	if (bio)
1120
		blk_rq_unmap_user(bio);
1121 1122 1123 1124 1125
 out:
	blk_mq_free_request(req);
	return ret;
}

1126
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
S
Sagi Grimberg 已提交
1127 1128
{
	struct nvme_ctrl *ctrl = rq->end_io_data;
1129 1130
	unsigned long flags;
	bool startka = false;
S
Sagi Grimberg 已提交
1131 1132 1133

	blk_mq_free_request(rq);

1134
	if (status) {
S
Sagi Grimberg 已提交
1135
		dev_err(ctrl->device,
1136 1137
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
S
Sagi Grimberg 已提交
1138 1139 1140
		return;
	}

1141
	ctrl->comp_seen = false;
1142 1143 1144 1145 1146 1147
	spin_lock_irqsave(&ctrl->lock, flags);
	if (ctrl->state == NVME_CTRL_LIVE ||
	    ctrl->state == NVME_CTRL_CONNECTING)
		startka = true;
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
1148
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
S
Sagi Grimberg 已提交
1149 1150 1151 1152 1153 1154
}

static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
	struct request *rq;

1155
	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
S
Sagi Grimberg 已提交
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
			NVME_QID_ANY);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;

	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);

	return 0;
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);
1172 1173 1174 1175 1176 1177
	bool comp_seen = ctrl->comp_seen;

	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
		dev_dbg(ctrl->device,
			"reschedule traffic based keep-alive timer\n");
		ctrl->comp_seen = false;
1178
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1179 1180
		return;
	}
S
Sagi Grimberg 已提交
1181 1182 1183 1184

	if (nvme_keep_alive(ctrl)) {
		/* allocation failure, reset the controller */
		dev_err(ctrl->device, "keep-alive failed\n");
1185
		nvme_reset_ctrl(ctrl);
S
Sagi Grimberg 已提交
1186 1187 1188 1189
		return;
	}
}

1190
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
S
Sagi Grimberg 已提交
1191 1192 1193 1194
{
	if (unlikely(ctrl->kato == 0))
		return;

1195
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
S
Sagi Grimberg 已提交
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
/*
 * In NVMe 1.0 the CNS field was just a binary controller or namespace
 * flag, thus sending any new CNS opcodes has a big chance of not working.
 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
 * (but not for any later version).
 */
static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
{
	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
		return ctrl->vs < NVME_VS(1, 2, 0);
	return ctrl->vs < NVME_VS(1, 1, 0);
}

K
Keith Busch 已提交
1220
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1221 1222 1223 1224 1225 1226
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
1227
	c.identify.cns = NVME_ID_CNS_CTRL;
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

1240 1241 1242 1243 1244
static bool nvme_multi_css(struct nvme_ctrl *ctrl)
{
	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}

1245
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1246
		struct nvme_ns_id_desc *cur, bool *csi_seen)
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
{
	const char *warn_str = "ctrl returned bogus length:";
	void *data = cur;

	switch (cur->nidt) {
	case NVME_NIDT_EUI64:
		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
		return NVME_NIDT_EUI64_LEN;
	case NVME_NIDT_NGUID:
		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
		return NVME_NIDT_NGUID_LEN;
	case NVME_NIDT_UUID:
		if (cur->nidl != NVME_NIDT_UUID_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
				 warn_str, cur->nidl);
			return -1;
		}
		uuid_copy(&ids->uuid, data + sizeof(*cur));
		return NVME_NIDT_UUID_LEN;
1276 1277 1278 1279 1280 1281 1282 1283 1284
	case NVME_NIDT_CSI:
		if (cur->nidl != NVME_NIDT_CSI_LEN) {
			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
				 warn_str, cur->nidl);
			return -1;
		}
		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
		*csi_seen = true;
		return NVME_NIDT_CSI_LEN;
1285 1286 1287 1288 1289 1290
	default:
		/* Skip unknown types */
		return cur->nidl;
	}
}

1291
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1292
		struct nvme_ns_ids *ids)
1293 1294
{
	struct nvme_command c = { };
1295 1296
	bool csi_seen = false;
	int status, pos, len;
1297 1298
	void *data;

1299 1300 1301
	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
		return 0;

1302 1303 1304 1305 1306 1307 1308 1309
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

1310
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1311
				      NVME_IDENTIFY_DATA_SIZE);
1312 1313 1314
	if (status) {
		dev_warn(ctrl->device,
			"Identify Descriptors failed (%d)\n", status);
1315
		goto free_data;
1316
	}
1317 1318 1319 1320 1321 1322 1323

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

1324
		len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1325
		if (len < 0)
1326
			break;
1327 1328 1329

		len += sizeof(*cur);
	}
1330 1331 1332 1333 1334 1335 1336

	if (nvme_multi_css(ctrl) && !csi_seen) {
		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
			 nsid);
		status = -EINVAL;
	}

1337 1338 1339 1340 1341
free_data:
	kfree(data);
	return status;
}

1342 1343 1344 1345 1346
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
	struct nvme_command c = { };

	c.identify.opcode = nvme_admin_identify;
1347
	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1348
	c.identify.nsid = cpu_to_le32(nsid);
1349 1350
	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
				    NVME_IDENTIFY_DATA_SIZE);
1351 1352
}

1353 1354
static int nvme_identify_ns(struct nvme_ctrl *ctrl,
		unsigned nsid, struct nvme_id_ns **id)
1355 1356 1357 1358 1359
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1360 1361
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
1362
	c.identify.cns = NVME_ID_CNS_NS;
1363

1364 1365 1366
	*id = kmalloc(sizeof(**id), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;
1367

1368
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1369
	if (error) {
1370
		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1371
		kfree(*id);
1372 1373
	}

1374
	return error;
1375 1376
}

K
Keith Busch 已提交
1377 1378
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1379
{
1380
	union nvme_result res = { 0 };
1381
	struct nvme_command c;
1382
	int ret;
1383 1384

	memset(&c, 0, sizeof(c));
K
Keith Busch 已提交
1385
	c.features.opcode = op;
1386 1387 1388
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1389
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1390
			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1391
	if (ret >= 0 && result)
1392
		*result = le32_to_cpu(res.u32);
1393
	return ret;
1394 1395
}

K
Keith Busch 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_set_features);

int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result)
{
	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
			     buflen, result);
}
EXPORT_SYMBOL_GPL(nvme_get_features);

C
Christoph Hellwig 已提交
1414 1415 1416 1417 1418 1419
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

1420
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
C
Christoph Hellwig 已提交
1421
			&result);
1422
	if (status < 0)
C
Christoph Hellwig 已提交
1423 1424
		return status;

1425 1426 1427 1428 1429 1430
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
1431
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1432 1433 1434 1435 1436 1437
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

C
Christoph Hellwig 已提交
1438 1439
	return 0;
}
1440
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
C
Christoph Hellwig 已提交
1441

1442
#define NVME_AEN_SUPPORTED \
1443 1444
	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1445 1446 1447

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
1448
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1449 1450
	int status;

1451 1452 1453 1454 1455
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
1456 1457
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1458
			 supported_aens);
1459 1460

	queue_work(nvme_wq, &ctrl->async_event_work);
1461 1462
}

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
/*
 * Convert integer values from ioctl structures to user pointers, silently
 * ignoring the upper bits in the compat case to match behaviour of 32-bit
 * kernels.
 */
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
{
	if (in_compat_syscall())
		ptrval = (compat_uptr_t)ptrval;
	return (void __user *)ptrval;
}

1475 1476 1477 1478 1479 1480 1481 1482 1483
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length, meta_len;
	void __user *metadata;

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
1484 1485
	if (io.flags)
		return -EINVAL;
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
	case nvme_cmd_compare:
		break;
	default:
		return -EINVAL;
	}

	length = (io.nblocks + 1) << ns->lba_shift;
	meta_len = (io.nblocks + 1) * ns->ms;
1498
	metadata = nvme_to_user_ptr(io.metadata);
1499

1500
	if (ns->features & NVME_NS_EXT_LBAS) {
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
		length += meta_len;
		meta_len = 0;
	} else if (meta_len) {
		if ((io.metadata & 3) || !io.metadata)
			return -EINVAL;
	}

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
C
Christoph Hellwig 已提交
1511
	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1512 1513 1514 1515 1516 1517 1518 1519
	c.rw.slba = cpu_to_le64(io.slba);
	c.rw.length = cpu_to_le16(io.nblocks);
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);

1520
	return nvme_submit_user_cmd(ns->queue, &c,
1521
			nvme_to_user_ptr(io.addr), length,
1522
			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1523 1524
}

1525
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1526 1527 1528 1529 1530
			struct nvme_passthru_cmd __user *ucmd)
{
	struct nvme_passthru_cmd cmd;
	struct nvme_command c;
	unsigned timeout = 0;
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
	u64 result;
	int status;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
		return -EFAULT;
	if (cmd.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
	c.common.cdw15 = cpu_to_le32(cmd.cdw15);

	if (cmd.timeout_ms)
		timeout = msecs_to_jiffies(cmd.timeout_ms);

	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1558 1559 1560
			nvme_to_user_ptr(cmd.addr), cmd.data_len,
			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
			0, &result, timeout);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575

	if (status >= 0) {
		if (put_user(result, &ucmd->result))
			return -EFAULT;
	}

	return status;
}

static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			struct nvme_passthru_cmd64 __user *ucmd)
{
	struct nvme_passthru_cmd64 cmd;
	struct nvme_command c;
	unsigned timeout = 0;
1576 1577 1578 1579 1580 1581
	int status;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
		return -EFAULT;
1582 1583
	if (cmd.flags)
		return -EINVAL;
1584 1585 1586 1587 1588 1589 1590

	memset(&c, 0, sizeof(c));
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1591 1592 1593 1594 1595 1596
	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1597 1598 1599 1600 1601

	if (cmd.timeout_ms)
		timeout = msecs_to_jiffies(cmd.timeout_ms);

	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1602 1603
			nvme_to_user_ptr(cmd.addr), cmd.data_len,
			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1604
			0, &cmd.result, timeout);
1605

1606 1607 1608 1609 1610 1611 1612 1613
	if (status >= 0) {
		if (put_user(cmd.result, &ucmd->result))
			return -EFAULT;
	}

	return status;
}

1614 1615 1616 1617
/*
 * Issue ioctl requests on the first available path.  Note that unlike normal
 * block layer requests we will not retry failed request on another controller.
 */
K
Keith Busch 已提交
1618
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1619
		struct nvme_ns_head **head, int *srcu_idx)
1620
{
1621 1622
#ifdef CONFIG_NVME_MULTIPATH
	if (disk->fops == &nvme_ns_head_ops) {
1623 1624
		struct nvme_ns *ns;

1625 1626
		*head = disk->private_data;
		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1627 1628 1629 1630
		ns = nvme_find_path(*head);
		if (!ns)
			srcu_read_unlock(&(*head)->srcu, *srcu_idx);
		return ns;
1631 1632 1633 1634 1635 1636
	}
#endif
	*head = NULL;
	*srcu_idx = -1;
	return disk->private_data;
}
1637

K
Keith Busch 已提交
1638
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1639 1640 1641 1642
{
	if (head)
		srcu_read_unlock(&head->srcu, idx);
}
1643

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
static bool is_ctrl_ioctl(unsigned int cmd)
{
	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
		return true;
	if (is_sed_ioctl(cmd))
		return true;
	return false;
}

static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
				  void __user *argp,
				  struct nvme_ns_head *head,
				  int srcu_idx)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	int ret;

	nvme_get_ctrl(ns->ctrl);
	nvme_put_ns_from_disk(head, srcu_idx);

	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		ret = nvme_user_cmd(ctrl, NULL, argp);
		break;
	case NVME_IOCTL_ADMIN64_CMD:
		ret = nvme_user_cmd64(ctrl, NULL, argp);
		break;
	default:
		ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
		break;
	}
	nvme_put_ctrl(ctrl);
	return ret;
}

1679 1680
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg)
1681
{
1682
	struct nvme_ns_head *head = NULL;
1683
	void __user *argp = (void __user *)arg;
1684 1685 1686 1687 1688
	struct nvme_ns *ns;
	int srcu_idx, ret;

	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
1689 1690
		return -EWOULDBLOCK;

1691 1692 1693 1694 1695
	/*
	 * Handle ioctls that apply to the controller instead of the namespace
	 * seperately and drop the ns SRCU reference early.  This avoids a
	 * deadlock when deleting namespaces using the passthrough interface.
	 */
1696 1697
	if (is_ctrl_ioctl(cmd))
		return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1698

1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	switch (cmd) {
	case NVME_IOCTL_ID:
		force_successful_syscall_return();
		ret = ns->head->ns_id;
		break;
	case NVME_IOCTL_IO_CMD:
		ret = nvme_user_cmd(ns->ctrl, ns, argp);
		break;
	case NVME_IOCTL_SUBMIT_IO:
		ret = nvme_submit_io(ns, argp);
		break;
1710 1711 1712
	case NVME_IOCTL_IO64_CMD:
		ret = nvme_user_cmd64(ns->ctrl, ns, argp);
		break;
1713 1714 1715 1716 1717 1718 1719
	default:
		if (ns->ndev)
			ret = nvme_nvm_ioctl(ns, cmd, arg);
		else
			ret = -ENOTTY;
	}

1720 1721
	nvme_put_ns_from_disk(head, srcu_idx);
	return ret;
1722 1723
}

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
#ifdef CONFIG_COMPAT
struct nvme_user_io32 {
	__u8	opcode;
	__u8	flags;
	__u16	control;
	__u16	nblocks;
	__u16	rsvd;
	__u64	metadata;
	__u64	addr;
	__u64	slba;
	__u32	dsmgmt;
	__u32	reftag;
	__u16	apptag;
	__u16	appmask;
} __attribute__((__packed__));

#define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)

static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg)
{
	/*
	 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
	 * between 32 bit programs and 64 bit kernel.
	 * The cause is that the results of sizeof(struct nvme_user_io),
	 * which is used to define NVME_IOCTL_SUBMIT_IO,
	 * are not same between 32 bit compiler and 64 bit compiler.
	 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
	 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
	 * Other IOCTL numbers are same between 32 bit and 64 bit.
	 * So there is nothing to do regarding to other IOCTL numbers.
	 */
	if (cmd == NVME_IOCTL_SUBMIT_IO32)
		return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);

	return nvme_ioctl(bdev, mode, cmd, arg);
}
#else
#define nvme_compat_ioctl	NULL
#endif /* CONFIG_COMPAT */

1765 1766
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
C
Christoph Hellwig 已提交
1767 1768
	struct nvme_ns *ns = bdev->bd_disk->private_data;

1769 1770 1771
#ifdef CONFIG_NVME_MULTIPATH
	/* should never be called due to GENHD_FL_HIDDEN */
	if (WARN_ON_ONCE(ns->head->disk))
1772
		goto fail;
1773
#endif
C
Christoph Hellwig 已提交
1774
	if (!kref_get_unless_zero(&ns->kref))
1775 1776 1777 1778
		goto fail;
	if (!try_module_get(ns->ctrl->ops->module))
		goto fail_put_ns;

C
Christoph Hellwig 已提交
1779
	return 0;
1780 1781 1782 1783 1784

fail_put_ns:
	nvme_put_ns(ns);
fail:
	return -ENXIO;
1785 1786 1787 1788
}

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
1789 1790 1791 1792
	struct nvme_ns *ns = disk->private_data;

	module_put(ns->ctrl->ops->module);
	nvme_put_ns(ns);
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
}

static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
	return 0;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
1805 1806
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1807 1808 1809
{
	struct blk_integrity integrity;

1810
	memset(&integrity, 0, sizeof(integrity));
1811
	switch (pi_type) {
1812 1813
	case NVME_NS_DPS_PI_TYPE3:
		integrity.profile = &t10_pi_type3_crc;
1814 1815
		integrity.tag_size = sizeof(u16) + sizeof(u32);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1816 1817 1818 1819
		break;
	case NVME_NS_DPS_PI_TYPE1:
	case NVME_NS_DPS_PI_TYPE2:
		integrity.profile = &t10_pi_type1_crc;
1820 1821
		integrity.tag_size = sizeof(u16);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1822 1823 1824 1825 1826
		break;
	default:
		integrity.profile = NULL;
		break;
	}
1827 1828
	integrity.tuple_size = ms;
	blk_integrity_register(disk, &integrity);
1829
	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1830 1831
}
#else
1832 1833
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
				u32 max_integrity_segments)
1834 1835 1836 1837
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

1838
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1839
{
1840
	struct nvme_ctrl *ctrl = ns->ctrl;
1841
	struct request_queue *queue = disk->queue;
1842 1843
	u32 size = queue_logical_block_size(queue);

1844 1845 1846 1847 1848 1849 1850
	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
		return;
	}

	if (ctrl->nr_streams && ns->sws && ns->sgs)
		size *= ns->sws * ns->sgs;
1851

1852 1853 1854
	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

1855
	queue->limits.discard_alignment = 0;
1856
	queue->limits.discard_granularity = size;
1857

1858 1859 1860 1861
	/* If discard is already enabled, don't reset queue limits */
	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
		return;

1862 1863
	blk_queue_max_discard_sectors(queue, UINT_MAX);
	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1864 1865

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1866
		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1867 1868
}

1869
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1870
{
1871
	u64 max_blocks;
1872

1873 1874
	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
		return;
	/*
	 * Even though NVMe spec explicitly states that MDTS is not
	 * applicable to the write-zeroes:- "The restriction does not apply to
	 * commands that do not transfer data between the host and the
	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
	 * In order to be more cautious use controller's max_hw_sectors value
	 * to configure the maximum sectors for the write-zeroes which is
	 * configured based on the controller's MDTS field in the
	 * nvme_init_identify() if available.
	 */
	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1887
		max_blocks = (u64)USHRT_MAX + 1;
1888
	else
1889
		max_blocks = ns->ctrl->max_hw_sectors + 1;
1890

1891 1892
	blk_queue_max_write_zeroes_sectors(disk->queue,
					   nvme_lba_to_sect(ns, max_blocks));
1893 1894
}

1895
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1896
		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1897
{
1898 1899
	memset(ids, 0, sizeof(*ids));

1900
	if (ctrl->vs >= NVME_VS(1, 1, 0))
1901
		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1902
	if (ctrl->vs >= NVME_VS(1, 2, 0))
1903
		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1904
	if (ctrl->vs >= NVME_VS(1, 3, 0) || nvme_multi_css(ctrl))
1905 1906
		return nvme_identify_ns_descs(ctrl, nsid, ids);
	return 0;
1907 1908
}

C
Christoph Hellwig 已提交
1909 1910 1911 1912 1913 1914 1915
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
	return !uuid_is_null(&ids->uuid) ||
		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
}

1916 1917 1918 1919
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
	return uuid_equal(&a->uuid, &b->uuid) &&
		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1920 1921
		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
		a->csi == b->csi;
1922 1923
}

K
Keith Busch 已提交
1924 1925
static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
				 u32 *phys_bs, u32 *io_opt)
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
{
	struct streams_directive_params s;
	int ret;

	if (!ctrl->nr_streams)
		return 0;

	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
	if (ret)
		return ret;

	ns->sws = le32_to_cpu(s.sws);
	ns->sgs = le16_to_cpu(s.sgs);

	if (ns->sws) {
K
Keith Busch 已提交
1941
		*phys_bs = ns->sws * (1 << ns->lba_shift);
1942
		if (ns->sgs)
K
Keith Busch 已提交
1943
			*io_opt = *phys_bs * ns->sgs;
1944 1945 1946 1947 1948
	}

	return 0;
}

1949 1950 1951
static void nvme_update_disk_info(struct gendisk *disk,
		struct nvme_ns *ns, struct nvme_id_ns *id)
{
1952
	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1953
	unsigned short bs = 1 << ns->lba_shift;
D
Damien Le Moal 已提交
1954
	u32 atomic_bs, phys_bs, io_opt = 0;
1955

1956 1957 1958 1959
	if (ns->lba_shift > PAGE_SHIFT) {
		/* unsupported block size, set capacity to 0 later */
		bs = (1 << 9);
	}
1960 1961 1962
	blk_mq_freeze_queue(disk->queue);
	blk_integrity_unregister(disk);

D
Damien Le Moal 已提交
1963
	atomic_bs = phys_bs = bs;
K
Keith Busch 已提交
1964
	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1965 1966 1967 1968 1969 1970
	if (id->nabo == 0) {
		/*
		 * Bit 1 indicates whether NAWUPF is defined for this namespace
		 * and whether it should be used instead of AWUPF. If NAWUPF ==
		 * 0 then AWUPF must be used instead.
		 */
1971
		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1972 1973 1974 1975
			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
		else
			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
	}
K
Keith Busch 已提交
1976

1977
	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1978
		/* NPWG = Namespace Preferred Write Granularity */
K
Keith Busch 已提交
1979
		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1980
		/* NOWS = Namespace Optimal Write Size */
K
Keith Busch 已提交
1981
		io_opt = bs * (1 + le16_to_cpu(id->nows));
1982 1983
	}

1984
	blk_queue_logical_block_size(disk->queue, bs);
1985 1986 1987 1988 1989 1990 1991 1992
	/*
	 * Linux filesystems assume writing a single physical block is
	 * an atomic operation. Hence limit the physical block size to the
	 * value of the Atomic Write Unit Power Fail parameter.
	 */
	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
	blk_queue_io_min(disk->queue, phys_bs);
	blk_queue_io_opt(disk->queue, io_opt);
1993

1994 1995 1996 1997 1998
	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
	if (ns->lba_shift > PAGE_SHIFT)
1999 2000
		capacity = 0;

2001 2002 2003 2004 2005 2006 2007 2008 2009
	/*
	 * Register a metadata profile for PI, or the plain non-integrity NVMe
	 * metadata masquerading as Type 0 if supported, otherwise reject block
	 * I/O to namespaces with metadata except when the namespace supports
	 * PI, as it can strip/insert in that case.
	 */
	if (ns->ms) {
		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
		    (ns->features & NVME_NS_METADATA_SUPPORTED))
2010 2011
			nvme_init_integrity(disk, ns->ms, ns->pi_type,
					    ns->ctrl->max_integrity_segments);
2012 2013 2014 2015
		else if (!nvme_ns_has_pi(ns))
			capacity = 0;
	}

2016
	set_capacity_revalidate_and_notify(disk, capacity, false);
2017

2018
	nvme_config_discard(disk, ns);
2019
	nvme_config_write_zeroes(disk, ns);
2020

2021
	if (id->nsattr & NVME_NS_ATTR_RO)
2022 2023 2024 2025
		set_disk_ro(disk, true);
	else
		set_disk_ro(disk, false);

2026 2027 2028
	blk_mq_unfreeze_queue(disk->queue);
}

2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
static inline bool nvme_first_scan(struct gendisk *disk)
{
	/* nvme_alloc_ns() scans the disk prior to adding it */
	return !(disk->flags & GENHD_FL_UP);
}

static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	u32 iob;

	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
	    is_power_of_2(ctrl->max_hw_sectors))
		iob = ctrl->max_hw_sectors;
	else
		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));

	if (!iob)
		return;

	if (!is_power_of_2(iob)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
				ns->disk->disk_name, iob);
		return;
	}

	if (blk_queue_is_zoned(ns->disk->queue)) {
		if (nvme_first_scan(ns->disk))
			pr_warn("%s: ignoring zoned namespace IO boundary\n",
				ns->disk->disk_name);
		return;
	}

	blk_queue_chunk_sectors(ns->queue, iob);
}

2066
static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
2067
{
K
Keith Busch 已提交
2068
	unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
2069
	struct nvme_ns *ns = disk->private_data;
2070
	struct nvme_ctrl *ctrl = ns->ctrl;
K
Keith Busch 已提交
2071
	int ret;
2072 2073 2074 2075 2076

	/*
	 * If identify namespace failed, use default 512 byte block size so
	 * block layer can use before failing read/write for 0 capacity.
	 */
K
Keith Busch 已提交
2077
	ns->lba_shift = id->lbaf[lbaf].ds;
2078 2079
	if (ns->lba_shift == 0)
		ns->lba_shift = 9;
2080

2081 2082 2083
	switch (ns->head->ids.csi) {
	case NVME_CSI_NVM:
		break;
K
Keith Busch 已提交
2084 2085 2086 2087 2088 2089 2090 2091 2092
	case NVME_CSI_ZNS:
		ret = nvme_update_zone_info(disk, ns, lbaf);
		if (ret) {
			dev_warn(ctrl->device,
				"failed to add zoned namespace:%u ret:%d\n",
				ns->head->ns_id, ret);
			return ret;
		}
		break;
2093
	default:
K
Keith Busch 已提交
2094
		dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
2095 2096 2097 2098
			ns->head->ids.csi, ns->head->ns_id);
		return -ENODEV;
	}

2099
	ns->features = 0;
K
Keith Busch 已提交
2100
	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
2101 2102 2103 2104 2105
	/* the PI implementation requires metadata equal t10 pi tuple size */
	if (ns->ms == sizeof(struct t10_pi_tuple))
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;
2106

2107 2108
	if (ns->ms) {
		/*
2109 2110 2111 2112 2113 2114
		 * For PCIe only the separate metadata pointer is supported,
		 * as the block layer supplies metadata in a separate bio_vec
		 * chain. For Fabrics, only metadata as part of extended data
		 * LBA is supported on the wire per the Fabrics specification,
		 * but the HBA/HCA will do the remapping from the separate
		 * metadata buffers for us.
2115
		 */
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
		if (id->flbas & NVME_NS_FLBAS_META_EXT) {
			ns->features |= NVME_NS_EXT_LBAS;
			if ((ctrl->ops->flags & NVME_F_FABRICS) &&
			    (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
			    ctrl->max_integrity_segments)
				ns->features |= NVME_NS_METADATA_SUPPORTED;
		} else {
			if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
				return -EINVAL;
			if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
2126 2127 2128 2129
				ns->features |= NVME_NS_METADATA_SUPPORTED;
		}
	}

2130
	nvme_set_chunk_sectors(ns, id);
2131
	nvme_update_disk_info(disk, ns, id);
2132
#ifdef CONFIG_NVME_MULTIPATH
2133
	if (ns->head->disk) {
2134
		nvme_update_disk_info(ns->head->disk, ns, id);
2135 2136
		blk_stack_limits(&ns->head->disk->queue->limits,
				 &ns->queue->limits, 0);
2137
		nvme_mpath_update_disk_size(ns->head->disk);
2138
	}
2139
#endif
2140
	return 0;
2141
}
2142

K
Keith Busch 已提交
2143
static int _nvme_revalidate_disk(struct gendisk *disk)
2144 2145
{
	struct nvme_ns *ns = disk->private_data;
2146 2147
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_id_ns *id;
2148
	struct nvme_ns_ids ids;
2149
	int ret = 0;
2150 2151 2152 2153 2154 2155

	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
		set_capacity(disk, 0);
		return -ENODEV;
	}

2156 2157 2158
	ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
	if (ret)
		goto out;
2159

2160 2161
	if (id->ncap == 0) {
		ret = -ENODEV;
2162
		goto free_id;
2163
	}
2164

2165 2166 2167 2168
	ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
	if (ret)
		goto free_id;

C
Christoph Hellwig 已提交
2169
	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
2170
		dev_err(ctrl->device,
C
Christoph Hellwig 已提交
2171
			"identifiers changed for nsid %d\n", ns->head->ns_id);
2172
		ret = -ENODEV;
2173
		goto free_id;
2174 2175
	}

2176
	ret = __nvme_revalidate_disk(disk, id);
2177
free_id:
2178
	kfree(id);
2179
out:
2180 2181 2182 2183 2184 2185 2186
	/*
	 * Only fail the function if we got a fatal error back from the
	 * device, otherwise ignore the error and just move on.
	 */
	if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
		ret = 0;
	else if (ret > 0)
2187
		ret = blk_status_to_errno(nvme_error_status(ret));
2188
	return ret;
2189 2190
}

K
Keith Busch 已提交
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
static int nvme_revalidate_disk(struct gendisk *disk)
{
	int ret;

	ret = _nvme_revalidate_disk(disk);
	if (ret)
		return ret;

#ifdef CONFIG_BLK_DEV_ZONED
	if (blk_queue_is_zoned(disk->queue)) {
		struct nvme_ns *ns = disk->private_data;
		struct nvme_ctrl *ctrl = ns->ctrl;

		ret = blk_revalidate_disk_zones(disk, NULL);
		if (!ret)
			blk_queue_max_zone_append_sectors(disk->queue,
							  ctrl->max_zone_append);
	}
#endif
	return ret;
}

2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
static char nvme_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 1;
	case PR_EXCLUSIVE_ACCESS:
		return 2;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 3;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 4;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 5;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 6;
	default:
		return 0;
	}
};

static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
				u64 key, u64 sa_key, u8 op)
{
2236 2237
	struct nvme_ns_head *head = NULL;
	struct nvme_ns *ns;
2238
	struct nvme_command c;
2239
	int srcu_idx, ret;
2240 2241
	u8 data[16] = { 0, };

2242 2243 2244 2245
	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
		return -EWOULDBLOCK;

2246 2247 2248 2249 2250
	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	memset(&c, 0, sizeof(c));
	c.common.opcode = op;
2251
	c.common.nsid = cpu_to_le32(ns->head->ns_id);
2252
	c.common.cdw10 = cpu_to_le32(cdw10);
2253

2254
	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2255 2256
	nvme_put_ns_from_disk(head, srcu_idx);
	return ret;
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
}

static int nvme_pr_register(struct block_device *bdev, u64 old,
		u64 new, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = old ? 2 : 0;
	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
}

static int nvme_pr_reserve(struct block_device *bdev, u64 key,
		enum pr_type type, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = nvme_pr_type(type) << 8;
	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
}

static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
{
2289
	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2290 2291 2292 2293 2294
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}

static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
2295
	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2296 2297 2298 2299 2300
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
}

static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
2301
	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}

static const struct pr_ops nvme_pr_ops = {
	.pr_register	= nvme_pr_register,
	.pr_reserve	= nvme_pr_reserve,
	.pr_release	= nvme_pr_release,
	.pr_preempt	= nvme_pr_preempt,
	.pr_clear	= nvme_pr_clear,
};

2313
#ifdef CONFIG_BLK_SED_OPAL
2314 2315
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
2316
{
2317
	struct nvme_ctrl *ctrl = data;
2318 2319 2320 2321 2322 2323 2324 2325
	struct nvme_command cmd;

	memset(&cmd, 0, sizeof(cmd));
	if (send)
		cmd.common.opcode = nvme_admin_security_send;
	else
		cmd.common.opcode = nvme_admin_security_recv;
	cmd.common.nsid = 0;
2326 2327
	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
	cmd.common.cdw11 = cpu_to_le32(len);
2328 2329

	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2330
				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
2331 2332 2333 2334
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */

2335
static const struct block_device_operations nvme_fops = {
2336 2337
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
2338
	.compat_ioctl	= nvme_compat_ioctl,
2339 2340 2341 2342
	.open		= nvme_open,
	.release	= nvme_release,
	.getgeo		= nvme_getgeo,
	.revalidate_disk= nvme_revalidate_disk,
K
Keith Busch 已提交
2343
	.report_zones	= nvme_report_zones,
2344 2345 2346
	.pr_ops		= &nvme_pr_ops,
};

2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
{
	struct nvme_ns_head *head = bdev->bd_disk->private_data;

	if (!kref_get_unless_zero(&head->ref))
		return -ENXIO;
	return 0;
}

static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
{
	nvme_put_ns_head(disk->private_data);
}

const struct block_device_operations nvme_ns_head_ops = {
	.owner		= THIS_MODULE,
2364
	.submit_bio	= nvme_ns_head_submit_bio,
2365 2366 2367
	.open		= nvme_ns_head_open,
	.release	= nvme_ns_head_release,
	.ioctl		= nvme_ioctl,
2368
	.compat_ioctl	= nvme_compat_ioctl,
2369
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
2370
	.report_zones	= nvme_report_zones,
2371 2372 2373 2374
	.pr_ops		= &nvme_pr_ops,
};
#endif /* CONFIG_NVME_MULTIPATH */

2375 2376 2377 2378 2379 2380 2381 2382
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
	unsigned long timeout =
		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
	int ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
K
Keith Busch 已提交
2383 2384
		if (csts == ~0)
			return -ENODEV;
2385 2386 2387
		if ((csts & NVME_CSTS_RDY) == bit)
			break;

2388
		usleep_range(1000, 2000);
2389 2390 2391
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2392
			dev_err(ctrl->device,
2393 2394
				"Device not ready; aborting %s, CSTS=0x%x\n",
				enabled ? "initialisation" : "reset", csts);
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
			return -ENODEV;
		}
	}

	return ret;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
2408
int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2409 2410 2411 2412 2413 2414 2415 2416 2417
{
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config &= ~NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2418

2419
	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2420 2421
		msleep(NVME_QUIRK_DELAY_AMOUNT);

2422
	return nvme_wait_ready(ctrl, ctrl->cap, false);
2423
}
2424
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2425

2426
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2427
{
2428
	unsigned dev_page_min;
2429 2430
	int ret;

2431 2432 2433 2434 2435 2436 2437
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
	if (ret) {
		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
		return ret;
	}
	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;

2438
	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2439
		dev_err(ctrl->device,
2440
			"Minimum device page size %u too large for host (%u)\n",
2441
			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2442 2443 2444
		return -ENODEV;
	}

2445 2446 2447 2448
	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
		ctrl->ctrl_config = NVME_CC_CSS_CSI;
	else
		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2449
	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2450
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2451 2452 2453 2454 2455 2456
	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
	ctrl->ctrl_config |= NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
2457
	return nvme_wait_ready(ctrl, ctrl->cap, true);
2458
}
2459
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2460 2461 2462

int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
2463
	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
	u32 csts;
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
2482
			dev_err(ctrl->device,
2483 2484 2485 2486 2487 2488 2489
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return ret;
}
2490
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2491

2492 2493 2494
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
2495 2496
	bool vwc = false;

2497
	if (ctrl->max_hw_sectors) {
2498
		u32 max_segments =
2499
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
2500

2501
		max_segments = min_not_zero(max_segments, ctrl->max_segments);
2502
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2503
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2504
	}
2505
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
K
Keith Busch 已提交
2506
	blk_queue_dma_alignment(q, 7);
2507 2508 2509
	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
		vwc = true;
	blk_queue_write_cache(q, vwc, vwc);
2510 2511
}

2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
	int ret;

	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
		return 0;

	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
			NULL);
	if (ret)
		dev_warn_once(ctrl->device,
			"could not set timestamp (%d)\n", ret);
	return ret;
}

2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
static int nvme_configure_acre(struct nvme_ctrl *ctrl)
{
	struct nvme_feat_host_behavior *host;
	int ret;

	/* Don't bother enabling the feature if retry delay is not reported */
	if (!ctrl->crdt[0])
		return 0;

	host = kzalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
		return 0;

	host->acre = NVME_ENABLE_ACRE;
	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
				host, sizeof(*host), NULL);
	kfree(host);
	return ret;
}

2549
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2550 2551 2552 2553 2554 2555 2556 2557
{
	/*
	 * APST (Autonomous Power State Transition) lets us program a
	 * table of power state transitions that the controller will
	 * perform automatically.  We configure it with a simple
	 * heuristic: we are willing to spend at most 2% of the time
	 * transitioning between power states.  Therefore, when running
	 * in any given state, we will enter the next lower-power
A
Andy Lutomirski 已提交
2558
	 * non-operational state after waiting 50 * (enlat + exlat)
2559
	 * microseconds, as long as that state's exit latency is under
2560 2561 2562 2563 2564 2565 2566 2567 2568
	 * the requested maximum latency.
	 *
	 * We will not autonomously enter any non-operational state for
	 * which the total latency exceeds ps_max_latency_us.  Users
	 * can set ps_max_latency_us to zero to turn off APST.
	 */

	unsigned apste;
	struct nvme_feat_auto_pst *table;
2569 2570
	u64 max_lat_us = 0;
	int max_ps = -1;
2571 2572 2573 2574 2575 2576 2577
	int ret;

	/*
	 * If APST isn't supported or if we haven't been initialized yet,
	 * then don't do anything.
	 */
	if (!ctrl->apsta)
2578
		return 0;
2579 2580 2581

	if (ctrl->npss > 31) {
		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2582
		return 0;
2583 2584 2585 2586
	}

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
2587
		return 0;
2588

2589
	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2590 2591
		/* Turn off APST. */
		apste = 0;
2592
		dev_dbg(ctrl->device, "APST disabled\n");
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
	} else {
		__le64 target = cpu_to_le64(0);
		int state;

		/*
		 * Walk through all states from lowest- to highest-power.
		 * According to the spec, lower-numbered states use more
		 * power.  NPSS, despite the name, is the index of the
		 * lowest-power state, not the number of states.
		 */
		for (state = (int)ctrl->npss; state >= 0; state--) {
2604
			u64 total_latency_us, exit_latency_us, transition_ms;
2605 2606 2607 2608

			if (target)
				table->entries[state] = target;

2609 2610 2611 2612 2613 2614 2615 2616
			/*
			 * Don't allow transitions to the deepest state
			 * if it's quirked off.
			 */
			if (state == ctrl->npss &&
			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
				continue;

2617 2618 2619 2620 2621 2622 2623 2624
			/*
			 * Is this state a useful non-operational state for
			 * higher-power states to autonomously transition to?
			 */
			if (!(ctrl->psd[state].flags &
			      NVME_PS_FLAGS_NON_OP_STATE))
				continue;

2625 2626 2627
			exit_latency_us =
				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
			if (exit_latency_us > ctrl->ps_max_latency_us)
2628 2629
				continue;

2630 2631 2632 2633
			total_latency_us =
				exit_latency_us +
				le32_to_cpu(ctrl->psd[state].entry_lat);

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
			/*
			 * This state is good.  Use it as the APST idle
			 * target for higher power states.
			 */
			transition_ms = total_latency_us + 19;
			do_div(transition_ms, 20);
			if (transition_ms > (1 << 24) - 1)
				transition_ms = (1 << 24) - 1;

			target = cpu_to_le64((state << 3) |
					     (transition_ms << 8));
2645 2646 2647 2648 2649 2650

			if (max_ps == -1)
				max_ps = state;

			if (total_latency_us > max_lat_us)
				max_lat_us = total_latency_us;
2651 2652 2653
		}

		apste = 1;
2654 2655 2656 2657 2658 2659 2660

		if (max_ps == -1) {
			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
		} else {
			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
				max_ps, max_lat_us, (int)sizeof(*table), table);
		}
2661 2662 2663 2664 2665 2666 2667 2668
	}

	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
				table, sizeof(*table), NULL);
	if (ret)
		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);

	kfree(table);
2669
	return ret;
2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	u64 latency;

	switch (val) {
	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
	case PM_QOS_LATENCY_ANY:
		latency = U64_MAX;
		break;

	default:
		latency = val;
	}

	if (ctrl->ps_max_latency_us != latency) {
		ctrl->ps_max_latency_us = latency;
		nvme_configure_apst(ctrl);
	}
}

2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
struct nvme_core_quirk_entry {
	/*
	 * NVMe model and firmware strings are padded with spaces.  For
	 * simplicity, strings in the quirk table are padded with NULLs
	 * instead.
	 */
	u16 vid;
	const char *mn;
	const char *fr;
	unsigned long quirks;
};

static const struct nvme_core_quirk_entry core_quirks[] = {
2706
	{
2707 2708 2709 2710 2711 2712
		/*
		 * This Toshiba device seems to die using any APST states.  See:
		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
		 */
		.vid = 0x1179,
		.mn = "THNSF5256GPUK TOSHIBA",
2713
		.quirks = NVME_QUIRK_NO_APST,
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
	},
	{
		/*
		 * This LiteON CL1-3D*-Q11 firmware version has a race
		 * condition associated with actions related to suspend to idle
		 * LiteON has resolved the problem in future firmware
		 */
		.vid = 0x14a4,
		.fr = "22301111",
		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2724
	}
2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
};

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{
	size_t matchlen;

	if (!match)
		return true;

	matchlen = strlen(match);
	WARN_ON_ONCE(matchlen > len);

	if (memcmp(idstr, match, matchlen))
		return false;

	for (; matchlen < len; matchlen++)
		if (idstr[matchlen] != ' ')
			return false;

	return true;
}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{
	return q->vid == le16_to_cpu(id->vid) &&
		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
		string_matches(id->fr, q->fr, sizeof(id->fr));
}

C
Christoph Hellwig 已提交
2756 2757
static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
2758 2759 2760 2761
{
	size_t nqnlen;
	int off;

2762 2763 2764 2765 2766 2767
	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
			return;
		}
2768

2769 2770 2771
		if (ctrl->vs >= NVME_VS(1, 2, 1))
			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
	}
2772 2773

	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
C
Christoph Hellwig 已提交
2774
	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2775
			"nqn.2014.08.org.nvmexpress:%04x%04x",
2776
			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
C
Christoph Hellwig 已提交
2777
	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2778
	off += sizeof(id->sn);
C
Christoph Hellwig 已提交
2779
	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2780
	off += sizeof(id->mn);
C
Christoph Hellwig 已提交
2781 2782 2783
	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}

2784
static void nvme_release_subsystem(struct device *dev)
C
Christoph Hellwig 已提交
2785
{
2786 2787 2788
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

2789 2790
	if (subsys->instance >= 0)
		ida_simple_remove(&nvme_instance_ida, subsys->instance);
C
Christoph Hellwig 已提交
2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
	kfree(subsys);
}

static void nvme_destroy_subsystem(struct kref *ref)
{
	struct nvme_subsystem *subsys =
			container_of(ref, struct nvme_subsystem, ref);

	mutex_lock(&nvme_subsystems_lock);
	list_del(&subsys->entry);
	mutex_unlock(&nvme_subsystems_lock);

C
Christoph Hellwig 已提交
2803
	ida_destroy(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	device_del(&subsys->dev);
	put_device(&subsys->dev);
}

static void nvme_put_subsystem(struct nvme_subsystem *subsys)
{
	kref_put(&subsys->ref, nvme_destroy_subsystem);
}

static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
{
	struct nvme_subsystem *subsys;

	lockdep_assert_held(&nvme_subsystems_lock);

2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
	/*
	 * Fail matches for discovery subsystems. This results
	 * in each discovery controller bound to a unique subsystem.
	 * This avoids issues with validating controller values
	 * that can only be true when there is a single unique subsystem.
	 * There may be multiple and completely independent entities
	 * that provide discovery controllers.
	 */
	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
		return NULL;

C
Christoph Hellwig 已提交
2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
	list_for_each_entry(subsys, &nvme_subsystems, entry) {
		if (strcmp(subsys->subnqn, subsysnqn))
			continue;
		if (!kref_get_unless_zero(&subsys->ref))
			continue;
		return subsys;
	}

	return NULL;
}

2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
#define SUBSYS_ATTR_RO(_name, _mode, _show)			\
	struct device_attribute subsys_attr_##_name = \
		__ATTR(_name, _mode, _show, NULL)

static ssize_t nvme_subsys_show_nqn(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct nvme_subsystem *subsys =
		container_of(dev, struct nvme_subsystem, dev);

	return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);

#define nvme_subsys_show_str_function(field)				\
static ssize_t subsys_##field##_show(struct device *dev,		\
			    struct device_attribute *attr, char *buf)	\
{									\
	struct nvme_subsystem *subsys =					\
		container_of(dev, struct nvme_subsystem, dev);		\
	return sprintf(buf, "%.*s\n",					\
		       (int)sizeof(subsys->field), subsys->field);	\
}									\
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);

nvme_subsys_show_str_function(model);
nvme_subsys_show_str_function(serial);
nvme_subsys_show_str_function(firmware_rev);

static struct attribute *nvme_subsys_attrs[] = {
	&subsys_attr_model.attr,
	&subsys_attr_serial.attr,
	&subsys_attr_firmware_rev.attr,
	&subsys_attr_subsysnqn.attr,
2876 2877 2878
#ifdef CONFIG_NVME_MULTIPATH
	&subsys_attr_iopolicy.attr,
#endif
2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
	NULL,
};

static struct attribute_group nvme_subsys_attrs_group = {
	.attrs = nvme_subsys_attrs,
};

static const struct attribute_group *nvme_subsys_attrs_groups[] = {
	&nvme_subsys_attrs_group,
	NULL,
};

2891 2892
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2893
{
2894
	struct nvme_ctrl *tmp;
2895

2896 2897
	lockdep_assert_held(&nvme_subsystems_lock);

2898
	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2899
		if (nvme_state_terminal(tmp))
2900 2901 2902 2903 2904 2905 2906 2907
			continue;

		if (tmp->cntlid == ctrl->cntlid) {
			dev_err(ctrl->device,
				"Duplicate cntlid %u with %s, rejecting\n",
				ctrl->cntlid, dev_name(tmp->device));
			return false;
		}
2908

2909
		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2910 2911 2912 2913 2914 2915
		    (ctrl->opts && ctrl->opts->discovery_nqn))
			continue;

		dev_err(ctrl->device,
			"Subsystem does not support multiple controllers\n");
		return false;
2916 2917
	}

2918
	return true;
2919 2920
}

C
Christoph Hellwig 已提交
2921 2922 2923 2924 2925 2926 2927 2928
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	struct nvme_subsystem *subsys, *found;
	int ret;

	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
	if (!subsys)
		return -ENOMEM;
2929 2930

	subsys->instance = -1;
C
Christoph Hellwig 已提交
2931 2932 2933
	mutex_init(&subsys->lock);
	kref_init(&subsys->ref);
	INIT_LIST_HEAD(&subsys->ctrls);
C
Christoph Hellwig 已提交
2934
	INIT_LIST_HEAD(&subsys->nsheads);
C
Christoph Hellwig 已提交
2935 2936 2937 2938 2939 2940
	nvme_init_subnqn(subsys, ctrl, id);
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->cmic = id->cmic;
2941
	subsys->awupf = le16_to_cpu(id->awupf);
2942 2943 2944
#ifdef CONFIG_NVME_MULTIPATH
	subsys->iopolicy = NVME_IOPOLICY_NUMA;
#endif
C
Christoph Hellwig 已提交
2945 2946 2947

	subsys->dev.class = nvme_subsys_class;
	subsys->dev.release = nvme_release_subsystem;
2948
	subsys->dev.groups = nvme_subsys_attrs_groups;
2949
	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
C
Christoph Hellwig 已提交
2950 2951 2952 2953 2954
	device_initialize(&subsys->dev);

	mutex_lock(&nvme_subsystems_lock);
	found = __nvme_find_get_subsystem(subsys->subnqn);
	if (found) {
2955
		put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2956
		subsys = found;
2957

2958
		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
C
Christoph Hellwig 已提交
2959
			ret = -EINVAL;
2960
			goto out_put_subsystem;
C
Christoph Hellwig 已提交
2961 2962 2963 2964 2965 2966
		}
	} else {
		ret = device_add(&subsys->dev);
		if (ret) {
			dev_err(ctrl->device,
				"failed to register subsystem device.\n");
2967
			put_device(&subsys->dev);
C
Christoph Hellwig 已提交
2968 2969
			goto out_unlock;
		}
C
Christoph Hellwig 已提交
2970
		ida_init(&subsys->ns_ida);
C
Christoph Hellwig 已提交
2971 2972 2973
		list_add_tail(&subsys->entry, &nvme_subsystems);
	}

2974 2975 2976
	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
				dev_name(ctrl->device));
	if (ret) {
C
Christoph Hellwig 已提交
2977 2978
		dev_err(ctrl->device,
			"failed to create sysfs link from subsystem.\n");
2979
		goto out_put_subsystem;
C
Christoph Hellwig 已提交
2980 2981
	}

2982 2983
	if (!found)
		subsys->instance = ctrl->instance;
2984
	ctrl->subsys = subsys;
C
Christoph Hellwig 已提交
2985
	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2986
	mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
2987 2988
	return 0;

2989 2990
out_put_subsystem:
	nvme_put_subsystem(subsys);
C
Christoph Hellwig 已提交
2991 2992 2993
out_unlock:
	mutex_unlock(&nvme_subsystems_lock);
	return ret;
2994 2995
}

2996
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2997
		void *log, size_t size, u64 offset)
K
Keith Busch 已提交
2998 2999
{
	struct nvme_command c = { };
K
Keith Busch 已提交
3000
	u32 dwlen = nvme_bytes_to_numd(size);
3001 3002

	c.get_log_page.opcode = nvme_admin_get_log_page;
3003
	c.get_log_page.nsid = cpu_to_le32(nsid);
3004
	c.get_log_page.lid = log_page;
3005
	c.get_log_page.lsp = lsp;
3006 3007
	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
3008 3009
	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
3010
	c.get_log_page.csi = csi;
K
Keith Busch 已提交
3011 3012 3013 3014

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}

3015
static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi)
3016
{
3017 3018
	struct nvme_cel *cel, *ret = NULL;

3019
	spin_lock_irq(&ctrl->lock);
3020 3021 3022 3023 3024 3025
	list_for_each_entry(cel, &ctrl->cels, entry) {
		if (cel->csi == csi) {
			ret = cel;
			break;
		}
	}
3026
	spin_unlock_irq(&ctrl->lock);
3027 3028 3029 3030 3031 3032

	return ret;
}

static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
				struct nvme_effects_log **log)
3033
{
3034
	struct nvme_cel *cel = nvme_find_cel(ctrl, csi);
3035 3036
	int ret;

3037 3038
	if (cel)
		goto out;
3039

3040 3041 3042
	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
	if (!cel)
		return -ENOMEM;
3043

3044 3045
	ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
			&cel->log, sizeof(cel->log), 0);
3046
	if (ret) {
3047 3048
		kfree(cel);
		return ret;
3049
	}
3050 3051 3052

	cel->csi = csi;

3053
	spin_lock_irq(&ctrl->lock);
3054
	list_add_tail(&cel->entry, &ctrl->cels);
3055
	spin_unlock_irq(&ctrl->lock);
3056 3057 3058
out:
	*log = &cel->log;
	return 0;
3059 3060
}

3061 3062 3063 3064 3065 3066 3067 3068 3069
/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_identify(struct nvme_ctrl *ctrl)
{
	struct nvme_id_ctrl *id;
	int ret, page_shift;
3070
	u32 max_hw_sectors;
3071
	bool prev_apst_enabled;
3072

3073 3074
	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
	if (ret) {
3075
		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3076 3077
		return ret;
	}
3078
	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
3079
	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3080

3081
	if (ctrl->vs >= NVME_VS(1, 1, 0))
3082
		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3083

3084 3085
	ret = nvme_identify_ctrl(ctrl, &id);
	if (ret) {
3086
		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3087 3088 3089
		return -EIO;
	}

3090
	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3091
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3092
		if (ret < 0)
3093
			goto out_free;
3094
	}
3095

3096 3097 3098
	if (!(ctrl->ops->flags & NVME_F_FABRICS))
		ctrl->cntlid = le16_to_cpu(id->cntlid);

3099
	if (!ctrl->identified) {
C
Christoph Hellwig 已提交
3100 3101 3102 3103 3104 3105
		int i;

		ret = nvme_init_subsystem(ctrl, id);
		if (ret)
			goto out_free;

3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
		/*
		 * Check for quirks.  Quirk can depend on firmware version,
		 * so, in principle, the set of quirks present can change
		 * across a reset.  As a possible future enhancement, we
		 * could re-scan for quirks every time we reinitialize
		 * the device, but we'd have to make sure that the driver
		 * behaves intelligently if the quirks change.
		 */
		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
			if (quirk_matches(id, &core_quirks[i]))
				ctrl->quirks |= core_quirks[i].quirks;
		}
	}

3120
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3121
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3122 3123 3124
		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
	}

3125 3126 3127 3128
	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
	ctrl->crdt[2] = le16_to_cpu(id->crdt3);

3129
	ctrl->oacs = le16_to_cpu(id->oacs);
3130
	ctrl->oncs = le16_to_cpu(id->oncs);
3131
	ctrl->mtfa = le16_to_cpu(id->mtfa);
3132
	ctrl->oaes = le32_to_cpu(id->oaes);
3133 3134 3135
	ctrl->wctemp = le16_to_cpu(id->wctemp);
	ctrl->cctemp = le16_to_cpu(id->cctemp);

3136
	atomic_set(&ctrl->abort_limit, id->acl + 1);
3137 3138
	ctrl->vwc = id->vwc;
	if (id->mdts)
3139
		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
3140
	else
3141 3142 3143
		max_hw_sectors = UINT_MAX;
	ctrl->max_hw_sectors =
		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3144

3145
	nvme_set_queue_limits(ctrl, ctrl->admin_q);
3146
	ctrl->sgls = le32_to_cpu(id->sgls);
S
Sagi Grimberg 已提交
3147
	ctrl->kas = le16_to_cpu(id->kas);
C
Christoph Hellwig 已提交
3148
	ctrl->max_namespaces = le32_to_cpu(id->mnan);
S
Sagi Grimberg 已提交
3149
	ctrl->ctratt = le32_to_cpu(id->ctratt);
3150

3151 3152
	if (id->rtd3e) {
		/* us -> s */
3153
		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3154 3155 3156 3157 3158

		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
						 shutdown_timeout, 60);

		if (ctrl->shutdown_timeout != shutdown_timeout)
3159
			dev_info(ctrl->device,
3160 3161 3162 3163 3164
				 "Shutdown timeout set to %u seconds\n",
				 ctrl->shutdown_timeout);
	} else
		ctrl->shutdown_timeout = shutdown_timeout;

3165
	ctrl->npss = id->npss;
3166 3167
	ctrl->apsta = id->apsta;
	prev_apst_enabled = ctrl->apst_enabled;
3168 3169
	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
		if (force_apst && id->apsta) {
3170
			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3171
			ctrl->apst_enabled = true;
3172
		} else {
3173
			ctrl->apst_enabled = false;
3174 3175
		}
	} else {
3176
		ctrl->apst_enabled = id->apsta;
3177
	}
3178 3179
	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));

3180
	if (ctrl->ops->flags & NVME_F_FABRICS) {
3181 3182 3183 3184 3185 3186 3187 3188 3189
		ctrl->icdoff = le16_to_cpu(id->icdoff);
		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
		ctrl->maxcmd = le16_to_cpu(id->maxcmd);

		/*
		 * In fabrics we need to verify the cntlid matches the
		 * admin connect
		 */
3190
		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3191 3192 3193 3194
			dev_err(ctrl->device,
				"Mismatching cntlid: Connect %u vs Identify "
				"%u, rejecting\n",
				ctrl->cntlid, le16_to_cpu(id->cntlid));
3195
			ret = -EINVAL;
3196 3197
			goto out_free;
		}
S
Sagi Grimberg 已提交
3198 3199

		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
3200
			dev_err(ctrl->device,
S
Sagi Grimberg 已提交
3201 3202
				"keep-alive support is mandatory for fabrics\n");
			ret = -EINVAL;
3203
			goto out_free;
S
Sagi Grimberg 已提交
3204
		}
3205
	} else {
3206 3207
		ctrl->hmpre = le32_to_cpu(id->hmpre);
		ctrl->hmmin = le32_to_cpu(id->hmmin);
3208 3209
		ctrl->hmminds = le32_to_cpu(id->hmminds);
		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3210
	}
3211

C
Christoph Hellwig 已提交
3212
	ret = nvme_mpath_init(ctrl, id);
3213
	kfree(id);
3214

C
Christoph Hellwig 已提交
3215 3216 3217
	if (ret < 0)
		return ret;

3218
	if (ctrl->apst_enabled && !prev_apst_enabled)
3219
		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3220
	else if (!ctrl->apst_enabled && prev_apst_enabled)
3221 3222
		dev_pm_qos_hide_latency_tolerance(ctrl->device);

3223 3224 3225
	ret = nvme_configure_apst(ctrl);
	if (ret < 0)
		return ret;
3226 3227 3228 3229
	
	ret = nvme_configure_timestamp(ctrl);
	if (ret < 0)
		return ret;
3230 3231 3232 3233

	ret = nvme_configure_directives(ctrl);
	if (ret < 0)
		return ret;
3234

3235 3236 3237 3238
	ret = nvme_configure_acre(ctrl);
	if (ret < 0)
		return ret;

K
Keith Busch 已提交
3239 3240 3241 3242 3243
	if (!ctrl->identified) {
		ret = nvme_hwmon_init(ctrl);
		if (ret < 0)
			return ret;
	}
3244

3245
	ctrl->identified = true;
3246

3247 3248 3249 3250
	return 0;

out_free:
	kfree(id);
3251
	return ret;
3252
}
3253
EXPORT_SYMBOL_GPL(nvme_init_identify);
3254

3255
static int nvme_dev_open(struct inode *inode, struct file *file)
3256
{
3257 3258
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3259

3260 3261 3262 3263
	switch (ctrl->state) {
	case NVME_CTRL_LIVE:
		break;
	default:
3264
		return -EWOULDBLOCK;
3265 3266
	}

3267 3268 3269 3270
	nvme_get_ctrl(ctrl);
	if (!try_module_get(ctrl->ops->module))
		return -EINVAL;

3271
	file->private_data = ctrl;
3272 3273 3274
	return 0;
}

3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
static int nvme_dev_release(struct inode *inode, struct file *file)
{
	struct nvme_ctrl *ctrl =
		container_of(inode->i_cdev, struct nvme_ctrl, cdev);

	module_put(ctrl->ops->module);
	nvme_put_ctrl(ctrl);
	return 0;
}

3285 3286 3287 3288 3289
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
	struct nvme_ns *ns;
	int ret;

3290
	down_read(&ctrl->namespaces_rwsem);
3291 3292 3293 3294 3295 3296 3297
	if (list_empty(&ctrl->namespaces)) {
		ret = -ENOTTY;
		goto out_unlock;
	}

	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3298
		dev_warn(ctrl->device,
3299 3300 3301 3302 3303
			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
		ret = -EINVAL;
		goto out_unlock;
	}

3304
	dev_warn(ctrl->device,
3305 3306
		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
	kref_get(&ns->kref);
3307
	up_read(&ctrl->namespaces_rwsem);
3308 3309 3310 3311 3312 3313

	ret = nvme_user_cmd(ctrl, ns, argp);
	nvme_put_ns(ns);
	return ret;

out_unlock:
3314
	up_read(&ctrl->namespaces_rwsem);
3315 3316 3317
	return ret;
}

3318 3319 3320 3321 3322 3323 3324 3325 3326
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg)
{
	struct nvme_ctrl *ctrl = file->private_data;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_cmd(ctrl, NULL, argp);
3327 3328
	case NVME_IOCTL_ADMIN64_CMD:
		return nvme_user_cmd64(ctrl, NULL, argp);
3329
	case NVME_IOCTL_IO_CMD:
3330
		return nvme_dev_user_cmd(ctrl, argp);
3331
	case NVME_IOCTL_RESET:
3332
		dev_warn(ctrl->device, "resetting controller\n");
3333
		return nvme_reset_ctrl_sync(ctrl);
3334 3335
	case NVME_IOCTL_SUBSYS_RESET:
		return nvme_reset_subsystem(ctrl);
K
Keith Busch 已提交
3336 3337 3338
	case NVME_IOCTL_RESCAN:
		nvme_queue_scan(ctrl);
		return 0;
3339 3340 3341 3342 3343 3344 3345 3346
	default:
		return -ENOTTY;
	}
}

static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
3347
	.release	= nvme_dev_release,
3348
	.unlocked_ioctl	= nvme_dev_ioctl,
3349
	.compat_ioctl	= compat_ptr_ioctl,
3350 3351 3352 3353 3354 3355 3356 3357 3358
};

static ssize_t nvme_sysfs_reset(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	int ret;

3359
	ret = nvme_reset_ctrl_sync(ctrl);
3360 3361 3362
	if (ret < 0)
		return ret;
	return count;
3363
}
3364
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3365

K
Keith Busch 已提交
3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
static ssize_t nvme_sysfs_rescan(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	nvme_queue_scan(ctrl);
	return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);

3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
	struct gendisk *disk = dev_to_disk(dev);

	if (disk->fops == &nvme_fops)
		return nvme_get_ns_from_dev(dev)->head;
	else
		return disk->private_data;
}

3387
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3388
		char *buf)
3389
{
3390 3391 3392
	struct nvme_ns_head *head = dev_to_ns_head(dev);
	struct nvme_ns_ids *ids = &head->ids;
	struct nvme_subsystem *subsys = head->subsys;
C
Christoph Hellwig 已提交
3393 3394
	int serial_len = sizeof(subsys->serial);
	int model_len = sizeof(subsys->model);
3395

3396 3397
	if (!uuid_is_null(&ids->uuid))
		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3398

3399 3400
	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
		return sprintf(buf, "eui.%16phN\n", ids->nguid);
3401

3402 3403
	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
		return sprintf(buf, "eui.%8phN\n", ids->eui64);
3404

C
Christoph Hellwig 已提交
3405 3406
	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
				  subsys->serial[serial_len - 1] == '\0'))
3407
		serial_len--;
C
Christoph Hellwig 已提交
3408 3409
	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
				 subsys->model[model_len - 1] == '\0'))
3410 3411
		model_len--;

C
Christoph Hellwig 已提交
3412 3413
	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
		serial_len, subsys->serial, model_len, subsys->model,
3414
		head->ns_id);
3415
}
J
Joe Perches 已提交
3416
static DEVICE_ATTR_RO(wwid);
3417

3418
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3419
		char *buf)
3420
{
3421
	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3422
}
J
Joe Perches 已提交
3423
static DEVICE_ATTR_RO(nguid);
3424

3425
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3426
		char *buf)
3427
{
3428
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3429 3430 3431 3432

	/* For backward compatibility expose the NGUID to userspace if
	 * we have no UUID set
	 */
3433
	if (uuid_is_null(&ids->uuid)) {
3434 3435
		printk_ratelimited(KERN_WARNING
				   "No UUID available providing old NGUID\n");
3436
		return sprintf(buf, "%pU\n", ids->nguid);
3437
	}
3438
	return sprintf(buf, "%pU\n", &ids->uuid);
3439
}
J
Joe Perches 已提交
3440
static DEVICE_ATTR_RO(uuid);
3441 3442

static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3443
		char *buf)
3444
{
3445
	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3446
}
J
Joe Perches 已提交
3447
static DEVICE_ATTR_RO(eui);
3448 3449

static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3450
		char *buf)
3451
{
3452
	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3453
}
J
Joe Perches 已提交
3454
static DEVICE_ATTR_RO(nsid);
3455

3456
static struct attribute *nvme_ns_id_attrs[] = {
3457
	&dev_attr_wwid.attr,
3458
	&dev_attr_uuid.attr,
3459
	&dev_attr_nguid.attr,
3460 3461
	&dev_attr_eui.attr,
	&dev_attr_nsid.attr,
C
Christoph Hellwig 已提交
3462 3463 3464 3465
#ifdef CONFIG_NVME_MULTIPATH
	&dev_attr_ana_grpid.attr,
	&dev_attr_ana_state.attr,
#endif
3466 3467 3468
	NULL,
};

3469
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3470 3471 3472
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
3473
	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3474 3475

	if (a == &dev_attr_uuid.attr) {
3476
		if (uuid_is_null(&ids->uuid) &&
3477
		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3478 3479 3480
			return 0;
	}
	if (a == &dev_attr_nguid.attr) {
3481
		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3482 3483 3484
			return 0;
	}
	if (a == &dev_attr_eui.attr) {
3485
		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3486 3487
			return 0;
	}
C
Christoph Hellwig 已提交
3488 3489 3490 3491 3492 3493 3494 3495
#ifdef CONFIG_NVME_MULTIPATH
	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
		if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
			return 0;
		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
			return 0;
	}
#endif
3496 3497 3498
	return a->mode;
}

3499
static const struct attribute_group nvme_ns_id_attr_group = {
3500 3501
	.attrs		= nvme_ns_id_attrs,
	.is_visible	= nvme_ns_id_attrs_are_visible,
3502 3503
};

3504 3505 3506 3507 3508 3509 3510 3511
const struct attribute_group *nvme_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
#ifdef CONFIG_NVM
	&nvme_nvm_attr_group,
#endif
	NULL,
};

M
Ming Lin 已提交
3512
#define nvme_show_str_function(field)						\
3513 3514 3515 3516
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
C
Christoph Hellwig 已提交
3517 3518
        return sprintf(buf, "%.*s\n",						\
		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3519 3520 3521
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

C
Christoph Hellwig 已提交
3522 3523 3524 3525
nvme_show_str_function(model);
nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);

M
Ming Lin 已提交
3526 3527 3528 3529 3530 3531 3532 3533 3534 3535
#define nvme_show_int_function(field)						\
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
        return sprintf(buf, "%d\n", ctrl->field);	\
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

nvme_show_int_function(cntlid);
3536
nvme_show_int_function(numa_node);
3537 3538
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
3539

M
Ming Lin 已提交
3540 3541 3542 3543 3544 3545 3546
static ssize_t nvme_sysfs_delete(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (device_remove_file_self(dev, attr))
3547
		nvme_delete_ctrl_sync(ctrl);
M
Ming Lin 已提交
3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561
	return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);

static ssize_t nvme_sysfs_show_transport(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

3562 3563 3564 3565 3566 3567 3568 3569 3570
static ssize_t nvme_sysfs_show_state(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	static const char *const state_name[] = {
		[NVME_CTRL_NEW]		= "new",
		[NVME_CTRL_LIVE]	= "live",
		[NVME_CTRL_RESETTING]	= "resetting",
3571
		[NVME_CTRL_CONNECTING]	= "connecting",
3572
		[NVME_CTRL_DELETING]	= "deleting",
3573
		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585
		[NVME_CTRL_DEAD]	= "dead",
	};

	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
	    state_name[ctrl->state])
		return sprintf(buf, "%s\n", state_name[ctrl->state]);

	return sprintf(buf, "unknown state\n");
}

static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);

M
Ming Lin 已提交
3586 3587 3588 3589 3590 3591
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

C
Christoph Hellwig 已提交
3592
	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
M
Ming Lin 已提交
3593 3594 3595
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

3596 3597 3598 3599 3600 3601 3602 3603 3604 3605
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);

M
Ming Lin 已提交
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625
static ssize_t nvme_sysfs_show_address(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);

3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;

	if (ctrl->opts->max_reconnects == -1)
		return sprintf(buf, "off\n");
	return sprintf(buf, "%d\n",
			opts->max_reconnects * opts->reconnect_delay);
}

static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ctrl_loss_tmo, err;

	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
	if (err)
		return -EINVAL;

	else if (ctrl_loss_tmo < 0)
		opts->max_reconnects = -1;
	else
		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
						opts->reconnect_delay);
	return count;
}
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);

static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (ctrl->opts->reconnect_delay == -1)
		return sprintf(buf, "off\n");
	return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
}

static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	unsigned int v;
	int err;

	err = kstrtou32(buf, 10, &v);
3677 3678
	if (err)
		return err;
3679 3680 3681 3682 3683 3684 3685

	ctrl->opts->reconnect_delay = v;
	return count;
}
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);

3686 3687
static struct attribute *nvme_dev_attrs[] = {
	&dev_attr_reset_controller.attr,
K
Keith Busch 已提交
3688
	&dev_attr_rescan_controller.attr,
3689 3690 3691
	&dev_attr_model.attr,
	&dev_attr_serial.attr,
	&dev_attr_firmware_rev.attr,
M
Ming Lin 已提交
3692
	&dev_attr_cntlid.attr,
M
Ming Lin 已提交
3693 3694 3695 3696
	&dev_attr_delete_controller.attr,
	&dev_attr_transport.attr,
	&dev_attr_subsysnqn.attr,
	&dev_attr_address.attr,
3697
	&dev_attr_state.attr,
3698
	&dev_attr_numa_node.attr,
3699 3700
	&dev_attr_queue_count.attr,
	&dev_attr_sqsize.attr,
3701
	&dev_attr_hostnqn.attr,
3702
	&dev_attr_hostid.attr,
3703 3704
	&dev_attr_ctrl_loss_tmo.attr,
	&dev_attr_reconnect_delay.attr,
3705 3706 3707
	NULL
};

M
Ming Lin 已提交
3708 3709 3710 3711 3712 3713
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

3714 3715 3716 3717
	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
		return 0;
	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
		return 0;
3718 3719
	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
		return 0;
3720 3721
	if (a == &dev_attr_hostid.attr && !ctrl->opts)
		return 0;
3722 3723 3724 3725
	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
		return 0;
	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
		return 0;
M
Ming Lin 已提交
3726 3727 3728 3729

	return a->mode;
}

3730
static struct attribute_group nvme_dev_attrs_group = {
M
Ming Lin 已提交
3731 3732
	.attrs		= nvme_dev_attrs,
	.is_visible	= nvme_dev_attrs_are_visible,
3733 3734 3735 3736 3737 3738 3739
};

static const struct attribute_group *nvme_dev_attr_groups[] = {
	&nvme_dev_attrs_group,
	NULL,
};

3740
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
C
Christoph Hellwig 已提交
3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
		unsigned nsid)
{
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
			return h;
	}

	return NULL;
}

static int __nvme_check_ids(struct nvme_subsystem *subsys,
		struct nvme_ns_head *new)
{
	struct nvme_ns_head *h;

	lockdep_assert_held(&subsys->lock);

	list_for_each_entry(h, &subsys->nsheads, entry) {
		if (nvme_ns_ids_valid(&new->ids) &&
		    nvme_ns_ids_equal(&new->ids, &h->ids))
			return -EINVAL;
	}

	return 0;
}

static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
K
Keith Busch 已提交
3772
		unsigned nsid, struct nvme_ns_ids *ids)
C
Christoph Hellwig 已提交
3773 3774
{
	struct nvme_ns_head *head;
3775
	size_t size = sizeof(*head);
C
Christoph Hellwig 已提交
3776 3777
	int ret = -ENOMEM;

3778 3779 3780 3781 3782
#ifdef CONFIG_NVME_MULTIPATH
	size += num_possible_nodes() * sizeof(struct nvme_ns *);
#endif

	head = kzalloc(size, GFP_KERNEL);
C
Christoph Hellwig 已提交
3783 3784 3785 3786 3787 3788 3789
	if (!head)
		goto out;
	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
	if (ret < 0)
		goto out_free_head;
	head->instance = ret;
	INIT_LIST_HEAD(&head->list);
3790 3791 3792
	ret = init_srcu_struct(&head->srcu);
	if (ret)
		goto out_ida_remove;
C
Christoph Hellwig 已提交
3793 3794
	head->subsys = ctrl->subsys;
	head->ns_id = nsid;
3795
	head->ids = *ids;
C
Christoph Hellwig 已提交
3796 3797 3798 3799 3800 3801 3802 3803 3804
	kref_init(&head->ref);

	ret = __nvme_check_ids(ctrl->subsys, head);
	if (ret) {
		dev_err(ctrl->device,
			"duplicate IDs for nsid %d\n", nsid);
		goto out_cleanup_srcu;
	}

3805 3806 3807 3808 3809 3810 3811
	if (head->ids.csi) {
		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
		if (ret)
			goto out_cleanup_srcu;
	} else
		head->effects = ctrl->effects;

3812 3813 3814 3815
	ret = nvme_mpath_alloc_disk(ctrl, head);
	if (ret)
		goto out_cleanup_srcu;

C
Christoph Hellwig 已提交
3816
	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3817 3818 3819

	kref_get(&ctrl->subsys->ref);

C
Christoph Hellwig 已提交
3820 3821 3822
	return head;
out_cleanup_srcu:
	cleanup_srcu_struct(&head->srcu);
3823
out_ida_remove:
C
Christoph Hellwig 已提交
3824 3825 3826 3827
	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
	kfree(head);
out:
3828 3829
	if (ret > 0)
		ret = blk_status_to_errno(nvme_error_status(ret));
C
Christoph Hellwig 已提交
3830 3831 3832 3833
	return ERR_PTR(ret);
}

static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3834
		struct nvme_id_ns *id)
C
Christoph Hellwig 已提交
3835 3836
{
	struct nvme_ctrl *ctrl = ns->ctrl;
3837
	bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
C
Christoph Hellwig 已提交
3838
	struct nvme_ns_head *head = NULL;
3839
	struct nvme_ns_ids ids;
C
Christoph Hellwig 已提交
3840 3841
	int ret = 0;

3842
	ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
3843 3844 3845 3846 3847
	if (ret) {
		if (ret < 0)
			return ret;
		return blk_status_to_errno(nvme_error_status(ret));
	}
3848

C
Christoph Hellwig 已提交
3849
	mutex_lock(&ctrl->subsys->lock);
3850
	head = nvme_find_ns_head(ctrl->subsys, nsid);
C
Christoph Hellwig 已提交
3851
	if (!head) {
K
Keith Busch 已提交
3852
		head = nvme_alloc_ns_head(ctrl, nsid, &ids);
C
Christoph Hellwig 已提交
3853 3854 3855 3856
		if (IS_ERR(head)) {
			ret = PTR_ERR(head);
			goto out_unlock;
		}
3857
		head->shared = is_shared;
C
Christoph Hellwig 已提交
3858
	} else {
3859
		ret = -EINVAL;
3860
		if (!is_shared || !head->shared) {
3861
			dev_err(ctrl->device,
3862 3863
				"Duplicate unshared namespace %d\n", nsid);
			goto out_put_ns_head;
3864
		}
C
Christoph Hellwig 已提交
3865 3866 3867 3868
		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
			dev_err(ctrl->device,
				"IDs don't match for shared namespace %d\n",
					nsid);
3869
			goto out_put_ns_head;
C
Christoph Hellwig 已提交
3870 3871 3872 3873 3874
		}
	}

	list_add_tail(&ns->siblings, &head->list);
	ns->head = head;
3875 3876
	mutex_unlock(&ctrl->subsys->lock);
	return 0;
C
Christoph Hellwig 已提交
3877

3878 3879
out_put_ns_head:
	nvme_put_ns_head(head);
C
Christoph Hellwig 已提交
3880 3881 3882 3883 3884
out_unlock:
	mutex_unlock(&ctrl->subsys->lock);
	return ret;
}

3885 3886 3887 3888 3889
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);

C
Christoph Hellwig 已提交
3890
	return nsa->head->ns_id - nsb->head->ns_id;
3891 3892
}

3893
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3894
{
3895
	struct nvme_ns *ns, *ret = NULL;
3896

3897
	down_read(&ctrl->namespaces_rwsem);
3898
	list_for_each_entry(ns, &ctrl->namespaces, list) {
C
Christoph Hellwig 已提交
3899
		if (ns->head->ns_id == nsid) {
3900 3901
			if (!kref_get_unless_zero(&ns->kref))
				continue;
3902 3903 3904
			ret = ns;
			break;
		}
C
Christoph Hellwig 已提交
3905
		if (ns->head->ns_id > nsid)
3906 3907
			break;
	}
3908
	up_read(&ctrl->namespaces_rwsem);
3909
	return ret;
3910
}
3911
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3912

3913
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3914 3915 3916
{
	struct nvme_ns *ns;
	struct gendisk *disk;
3917 3918
	struct nvme_id_ns *id;
	char disk_name[DISK_NAME_LEN];
3919
	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3920 3921 3922

	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
3923
		return;
3924 3925

	ns->queue = blk_mq_init_queue(ctrl->tagset);
3926
	if (IS_ERR(ns->queue))
C
Christoph Hellwig 已提交
3927
		goto out_free_ns;
3928

3929
	if (ctrl->opts && ctrl->opts->data_digest)
3930 3931 3932
		ns->queue->backing_dev_info->capabilities
			|= BDI_CAP_STABLE_WRITES;

3933
	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3934 3935 3936
	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

3937 3938 3939 3940 3941 3942 3943
	ns->queue->queuedata = ns;
	ns->ctrl = ctrl;

	kref_init(&ns->kref);
	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */

	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3944
	nvme_set_queue_limits(ctrl, ns->queue);
3945

3946 3947
	ret = nvme_identify_ns(ctrl, nsid, &id);
	if (ret)
3948 3949
		goto out_free_queue;

3950
	if (id->ncap == 0)	/* no namespace (legacy quirk) */
3951 3952
		goto out_free_id;

3953 3954
	ret = nvme_init_ns_head(ns, nsid, id);
	if (ret)
C
Christoph Hellwig 已提交
3955
		goto out_free_id;
3956
	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3957

3958
	disk = alloc_disk_node(0, node);
3959
	if (!disk)
C
Christoph Hellwig 已提交
3960
		goto out_unlink_ns;
3961

3962 3963 3964
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
3965
	disk->flags = flags;
3966 3967 3968
	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
	ns->disk = disk;

3969
	if (__nvme_revalidate_disk(disk, id))
3970
		goto out_put_disk;
3971

3972
	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3973 3974
		ret = nvme_nvm_register(ns, disk_name, node);
		if (ret) {
3975 3976 3977 3978 3979
			dev_warn(ctrl->device, "LightNVM init failure\n");
			goto out_put_disk;
		}
	}

3980
	down_write(&ctrl->namespaces_rwsem);
3981
	list_add_tail(&ns->list, &ctrl->namespaces);
3982
	up_write(&ctrl->namespaces_rwsem);
3983

3984
	nvme_get_ctrl(ctrl);
3985

3986
	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3987

C
Christoph Hellwig 已提交
3988
	nvme_mpath_add_disk(ns, id);
3989
	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
C
Christoph Hellwig 已提交
3990 3991
	kfree(id);

3992
	return;
3993
 out_put_disk:
3994 3995
	/* prevent double queue cleanup */
	ns->disk->queue = NULL;
3996
	put_disk(ns->disk);
C
Christoph Hellwig 已提交
3997 3998 3999
 out_unlink_ns:
	mutex_lock(&ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
4000 4001
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
C
Christoph Hellwig 已提交
4002
	mutex_unlock(&ctrl->subsys->lock);
4003
	nvme_put_ns_head(ns->head);
4004 4005
 out_free_id:
	kfree(id);
4006 4007 4008 4009 4010 4011 4012 4013
 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
}

static void nvme_ns_remove(struct nvme_ns *ns)
{
4014 4015
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;
4016

4017
	nvme_fault_inject_fini(&ns->fault_inject);
4018 4019 4020

	mutex_lock(&ns->ctrl->subsys->lock);
	list_del_rcu(&ns->siblings);
4021 4022
	if (list_empty(&ns->head->list))
		list_del_init(&ns->head->entry);
4023
	mutex_unlock(&ns->ctrl->subsys->lock);
4024

4025 4026 4027 4028
	synchronize_rcu(); /* guarantee not available in head->list */
	nvme_mpath_clear_current_path(ns);
	synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */

C
Christoph Hellwig 已提交
4029
	if (ns->disk->flags & GENHD_FL_UP) {
4030 4031
		del_gendisk(ns->disk);
		blk_cleanup_queue(ns->queue);
4032 4033
		if (blk_get_integrity(ns->disk))
			blk_integrity_unregister(ns->disk);
4034
	}
4035

4036
	down_write(&ns->ctrl->namespaces_rwsem);
4037
	list_del_init(&ns->list);
4038
	up_write(&ns->ctrl->namespaces_rwsem);
4039

4040
	nvme_mpath_check_last_path(ns);
4041 4042 4043
	nvme_put_ns(ns);
}

4044 4045 4046 4047 4048 4049 4050 4051 4052 4053
static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{
	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);

	if (ns) {
		nvme_ns_remove(ns);
		nvme_put_ns(ns);
	}
}

4054 4055 4056 4057
static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
	struct nvme_ns *ns;

4058
	ns = nvme_find_get_ns(ctrl, nsid);
4059
	if (ns) {
C
Christoph Hellwig 已提交
4060
		if (revalidate_disk(ns->disk))
4061
			nvme_ns_remove(ns);
4062
		nvme_put_ns(ns);
4063 4064 4065 4066
	} else
		nvme_alloc_ns(ctrl, nsid);
}

4067 4068 4069 4070
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{
	struct nvme_ns *ns, *next;
4071
	LIST_HEAD(rm_list);
4072

4073
	down_write(&ctrl->namespaces_rwsem);
4074
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4075
		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4076
			list_move_tail(&ns->list, &rm_list);
4077
	}
4078
	up_write(&ctrl->namespaces_rwsem);
4079 4080 4081 4082

	list_for_each_entry_safe(ns, next, &rm_list, list)
		nvme_ns_remove(ns);

4083 4084
}

4085
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4086
{
4087
	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4088
	__le32 *ns_list;
4089 4090
	u32 prev = 0;
	int ret = 0, i;
4091

4092 4093
	if (nvme_ctrl_limited_cns(ctrl))
		return -EOPNOTSUPP;
4094

4095
	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4096 4097 4098
	if (!ns_list)
		return -ENOMEM;

4099
	for (;;) {
4100 4101
		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
		if (ret)
4102
			goto free;
4103

4104
		for (i = 0; i < nr_entries; i++) {
4105
			u32 nsid = le32_to_cpu(ns_list[i]);
4106

4107 4108
			if (!nsid)	/* end of the list? */
				goto out;
4109
			nvme_validate_ns(ctrl, nsid);
4110 4111
			while (++prev < nsid)
				nvme_ns_remove_by_nsid(ctrl, prev);
4112 4113 4114
		}
	}
 out:
4115 4116
	nvme_remove_invalid_namespaces(ctrl, prev);
 free:
4117 4118 4119 4120
	kfree(ns_list);
	return ret;
}

4121
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4122
{
4123 4124 4125 4126 4127 4128 4129
	struct nvme_id_ctrl *id;
	u32 nn, i;

	if (nvme_identify_ctrl(ctrl, &id))
		return;
	nn = le32_to_cpu(id->nn);
	kfree(id);
4130

4131 4132 4133
	for (i = 1; i <= nn; i++)
		nvme_validate_ns(ctrl, i);

4134
	nvme_remove_invalid_namespaces(ctrl, nn);
4135 4136
}

4137
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4138 4139 4140
{
	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
	__le32 *log;
4141
	int error;
4142 4143 4144

	log = kzalloc(log_size, GFP_KERNEL);
	if (!log)
4145
		return;
4146

4147 4148 4149 4150 4151 4152
	/*
	 * We need to read the log to clear the AEN, but we don't want to rely
	 * on it for the changed namespace information as userspace could have
	 * raced with us in reading the log page, which could cause us to miss
	 * updates.
	 */
4153 4154
	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
			NVME_CSI_NVM, log, log_size, 0);
4155
	if (error)
4156 4157 4158 4159 4160 4161
		dev_warn(ctrl->device,
			"reading changed ns log failed: %d\n", error);

	kfree(log);
}

4162
static void nvme_scan_work(struct work_struct *work)
4163
{
4164 4165
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, scan_work);
4166

K
Keith Busch 已提交
4167 4168
	/* No tagset on a live ctrl means IO queues could not created */
	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4169 4170
		return;

D
Dan Carpenter 已提交
4171
	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4172
		dev_info(ctrl->device, "rescanning namespaces.\n");
4173
		nvme_clear_changed_ns_log(ctrl);
4174 4175
	}

4176
	mutex_lock(&ctrl->scan_lock);
4177 4178
	if (nvme_scan_ns_list(ctrl) != 0)
		nvme_scan_ns_sequential(ctrl);
4179
	mutex_unlock(&ctrl->scan_lock);
4180

4181
	down_write(&ctrl->namespaces_rwsem);
4182
	list_sort(NULL, &ctrl->namespaces, ns_cmp);
4183
	up_write(&ctrl->namespaces_rwsem);
4184
}
4185

4186 4187 4188 4189 4190
/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
4191 4192 4193
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;
4194
	LIST_HEAD(ns_list);
4195

4196 4197 4198 4199 4200 4201 4202
	/*
	 * make sure to requeue I/O to all namespaces as these
	 * might result from the scan itself and must complete
	 * for the scan_work to make progress
	 */
	nvme_mpath_clear_ctrl_paths(ctrl);

4203 4204 4205
	/* prevent racing with ns scanning */
	flush_work(&ctrl->scan_work);

4206 4207 4208 4209 4210 4211 4212 4213 4214
	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
	 * removing the namespaces' disks; fail all the queues now to avoid
	 * potentially having to clean up the failed sync later.
	 */
	if (ctrl->state == NVME_CTRL_DEAD)
		nvme_kill_queues(ctrl);

4215 4216 4217
	/* this is a no-op when called from the controller reset handler */
	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);

4218
	down_write(&ctrl->namespaces_rwsem);
4219
	list_splice_init(&ctrl->namespaces, &ns_list);
4220
	up_write(&ctrl->namespaces_rwsem);
4221 4222

	list_for_each_entry_safe(ns, next, &ns_list, list)
4223 4224
		nvme_ns_remove(ns);
}
4225
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4226

4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253
static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
	struct nvmf_ctrl_options *opts = ctrl->opts;
	int ret;

	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
	if (ret)
		return ret;

	if (opts) {
		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
				opts->trsvcid ?: "none");
		if (ret)
			return ret;

		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
				opts->host_traddr ?: "none");
	}
	return ret;
}

4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
	char *envp[2] = { NULL, NULL };
	u32 aen_result = ctrl->aen_result;

	ctrl->aen_result = 0;
	if (!aen_result)
		return;

	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
	if (!envp[0])
		return;
	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
	kfree(envp[0]);
}

4270 4271 4272 4273 4274
static void nvme_async_event_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, async_event_work);

4275
	nvme_aen_uevent(ctrl);
4276
	ctrl->ops->submit_async_event(ctrl);
4277 4278
}

4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{

	u32 csts;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
		return false;

	if (csts == ~0)
		return false;

	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
	struct nvme_fw_slot_info_log *log;

	log = kmalloc(sizeof(*log), GFP_KERNEL);
	if (!log)
		return;

4301 4302
	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
			log, sizeof(*log), 0))
4303
		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
	kfree(log);
}

static void nvme_fw_act_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(work,
				struct nvme_ctrl, fw_act_work);
	unsigned long fw_act_timeout;

	if (ctrl->mtfa)
		fw_act_timeout = jiffies +
				msecs_to_jiffies(ctrl->mtfa * 100);
	else
		fw_act_timeout = jiffies +
				msecs_to_jiffies(admin_timeout * 1000);

	nvme_stop_queues(ctrl);
	while (nvme_ctrl_pp_status(ctrl)) {
		if (time_after(jiffies, fw_act_timeout)) {
			dev_warn(ctrl->device,
				"Fw activation timeout, reset controller\n");
4325 4326
			nvme_try_sched_reset(ctrl);
			return;
4327 4328 4329 4330
		}
		msleep(100);
	}

4331
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4332 4333 4334
		return;

	nvme_start_queues(ctrl);
4335
	/* read FW slot information to clear the AER */
4336 4337 4338
	nvme_get_fw_slot_info(ctrl);
}

4339 4340
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
4341 4342
	u32 aer_notice_type = (result & 0xff00) >> 8;

4343 4344
	trace_nvme_async_event(ctrl, aer_notice_type);

4345
	switch (aer_notice_type) {
4346
	case NVME_AER_NOTICE_NS_CHANGED:
D
Dan Carpenter 已提交
4347
		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4348 4349 4350
		nvme_queue_scan(ctrl);
		break;
	case NVME_AER_NOTICE_FW_ACT_STARTING:
4351 4352 4353 4354 4355 4356 4357
		/*
		 * We are (ab)using the RESETTING state to prevent subsequent
		 * recovery actions from interfering with the controller's
		 * firmware activation.
		 */
		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
			queue_work(nvme_wq, &ctrl->fw_act_work);
4358
		break;
C
Christoph Hellwig 已提交
4359 4360 4361 4362 4363 4364 4365
#ifdef CONFIG_NVME_MULTIPATH
	case NVME_AER_NOTICE_ANA:
		if (!ctrl->ana_log_buf)
			break;
		queue_work(nvme_wq, &ctrl->ana_work);
		break;
#endif
4366 4367 4368
	case NVME_AER_NOTICE_DISC_CHANGED:
		ctrl->aen_result = result;
		break;
4369 4370 4371 4372 4373
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
}

4374
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4375
		volatile union nvme_result *res)
4376
{
4377
	u32 result = le32_to_cpu(res->u32);
4378
	u32 aer_type = result & 0x07;
4379

4380
	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4381 4382
		return;

4383
	switch (aer_type) {
4384 4385 4386
	case NVME_AER_NOTICE:
		nvme_handle_aen_notice(ctrl, result);
		break;
4387 4388 4389 4390
	case NVME_AER_ERROR:
	case NVME_AER_SMART:
	case NVME_AER_CSS:
	case NVME_AER_VS:
4391
		trace_nvme_async_event(ctrl, aer_type);
4392
		ctrl->aen_result = result;
4393 4394 4395
		break;
	default:
		break;
4396
	}
4397
	queue_work(nvme_wq, &ctrl->async_event_work);
4398 4399
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4400

4401
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4402
{
C
Christoph Hellwig 已提交
4403
	nvme_mpath_stop(ctrl);
4404
	nvme_stop_keep_alive(ctrl);
4405
	flush_work(&ctrl->async_event_work);
4406
	cancel_work_sync(&ctrl->fw_act_work);
4407 4408 4409 4410 4411
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
4412
	nvme_start_keep_alive(ctrl);
4413

4414 4415
	nvme_enable_aen(ctrl);

4416 4417 4418 4419 4420 4421
	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_start_queues(ctrl);
	}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4422

4423 4424
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
4425
	nvme_fault_inject_fini(&ctrl->fault_inject);
4426
	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4427
	cdev_device_del(&ctrl->cdev, ctrl->device);
4428
	nvme_put_ctrl(ctrl);
4429
}
4430
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4431

4432
static void nvme_free_ctrl(struct device *dev)
4433
{
4434 4435
	struct nvme_ctrl *ctrl =
		container_of(dev, struct nvme_ctrl, ctrl_device);
C
Christoph Hellwig 已提交
4436
	struct nvme_subsystem *subsys = ctrl->subsys;
4437
	struct nvme_cel *cel, *next;
4438

K
Keith Busch 已提交
4439
	if (!subsys || ctrl->instance != subsys->instance)
4440 4441
		ida_simple_remove(&nvme_instance_ida, ctrl->instance);

4442 4443 4444 4445 4446
	list_for_each_entry_safe(cel, next, &ctrl->cels, entry) {
		list_del(&cel->entry);
		kfree(cel);
	}

C
Christoph Hellwig 已提交
4447
	nvme_mpath_uninit(ctrl);
S
Sagi Grimberg 已提交
4448
	__free_page(ctrl->discard_page);
4449

C
Christoph Hellwig 已提交
4450
	if (subsys) {
4451
		mutex_lock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4452 4453
		list_del(&ctrl->subsys_entry);
		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4454
		mutex_unlock(&nvme_subsystems_lock);
C
Christoph Hellwig 已提交
4455
	}
4456 4457 4458

	ctrl->ops->free_ctrl(ctrl);

C
Christoph Hellwig 已提交
4459 4460
	if (subsys)
		nvme_put_subsystem(subsys);
4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
}

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
	int ret;

4473 4474
	ctrl->state = NVME_CTRL_NEW;
	spin_lock_init(&ctrl->lock);
4475
	mutex_init(&ctrl->scan_lock);
4476
	INIT_LIST_HEAD(&ctrl->namespaces);
4477
	INIT_LIST_HEAD(&ctrl->cels);
4478
	init_rwsem(&ctrl->namespaces_rwsem);
4479 4480 4481
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->quirks = quirks;
4482
	ctrl->numa_node = NUMA_NO_NODE;
4483
	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4484
	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4485
	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4486
	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4487
	init_waitqueue_head(&ctrl->state_wq);
4488

4489 4490 4491 4492
	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;

4493 4494 4495 4496 4497 4498 4499 4500
	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
			PAGE_SIZE);
	ctrl->discard_page = alloc_page(GFP_KERNEL);
	if (!ctrl->discard_page) {
		ret = -ENOMEM;
		goto out;
	}

4501 4502
	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
	if (ret < 0)
4503
		goto out;
4504
	ctrl->instance = ret;
4505

4506 4507
	device_initialize(&ctrl->ctrl_device);
	ctrl->device = &ctrl->ctrl_device;
4508
	ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
4509 4510 4511 4512 4513 4514 4515
	ctrl->device->class = nvme_class;
	ctrl->device->parent = ctrl->dev;
	ctrl->device->groups = nvme_dev_attr_groups;
	ctrl->device->release = nvme_free_ctrl;
	dev_set_drvdata(ctrl->device, ctrl);
	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
	if (ret)
4516 4517
		goto out_release_instance;

4518
	nvme_get_ctrl(ctrl);
4519 4520 4521
	cdev_init(&ctrl->cdev, &nvme_dev_fops);
	ctrl->cdev.owner = ops->module;
	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4522 4523
	if (ret)
		goto out_free_name;
4524

4525 4526 4527 4528 4529 4530 4531 4532
	/*
	 * Initialize latency tolerance controls.  The sysfs files won't
	 * be visible to userspace unless the device actually supports APST.
	 */
	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
		min(default_ps_max_latency_us, (unsigned long)S32_MAX));

4533 4534
	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));

4535
	return 0;
4536
out_free_name:
4537
	nvme_put_ctrl(ctrl);
4538
	kfree_const(ctrl->device->kobj.name);
4539
out_release_instance:
4540
	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4541
out:
4542 4543
	if (ctrl->discard_page)
		__free_page(ctrl->discard_page);
4544 4545
	return ret;
}
4546
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4547

4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558
/**
 * nvme_kill_queues(): Ends all namespace queues
 * @ctrl: the dead controller that needs to end
 *
 * Call this function when the driver determines it is unable to get the
 * controller in a state capable of servicing IO.
 */
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4559
	down_read(&ctrl->namespaces_rwsem);
M
Ming Lei 已提交
4560

4561
	/* Forcibly unquiesce queues to avoid blocking dispatch */
I
Igor Konopko 已提交
4562
	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4563
		blk_mq_unquiesce_queue(ctrl->admin_q);
4564

4565 4566
	list_for_each_entry(ns, &ctrl->namespaces, list)
		nvme_set_queue_dying(ns);
4567

4568
	up_read(&ctrl->namespaces_rwsem);
4569
}
4570
EXPORT_SYMBOL_GPL(nvme_kill_queues);
4571

K
Keith Busch 已提交
4572 4573 4574 4575
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4576
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4577 4578
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_unfreeze_queue(ns->queue);
4579
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4580 4581 4582
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);

4583
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
K
Keith Busch 已提交
4584 4585 4586
{
	struct nvme_ns *ns;

4587
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4588 4589 4590 4591 4592
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
		if (timeout <= 0)
			break;
	}
4593
	up_read(&ctrl->namespaces_rwsem);
4594
	return timeout;
K
Keith Busch 已提交
4595 4596 4597 4598 4599 4600 4601
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4602
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4603 4604
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_freeze_queue_wait(ns->queue);
4605
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4606 4607 4608 4609 4610 4611 4612
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

4613
	down_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4614
	list_for_each_entry(ns, &ctrl->namespaces, list)
4615
		blk_freeze_queue_start(ns->queue);
4616
	up_read(&ctrl->namespaces_rwsem);
K
Keith Busch 已提交
4617 4618 4619
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);

4620
void nvme_stop_queues(struct nvme_ctrl *ctrl)
4621 4622 4623
{
	struct nvme_ns *ns;

4624
	down_read(&ctrl->namespaces_rwsem);
4625
	list_for_each_entry(ns, &ctrl->namespaces, list)
4626
		blk_mq_quiesce_queue(ns->queue);
4627
	up_read(&ctrl->namespaces_rwsem);
4628
}
4629
EXPORT_SYMBOL_GPL(nvme_stop_queues);
4630

4631
void nvme_start_queues(struct nvme_ctrl *ctrl)
4632 4633 4634
{
	struct nvme_ns *ns;

4635
	down_read(&ctrl->namespaces_rwsem);
4636
	list_for_each_entry(ns, &ctrl->namespaces, list)
4637
		blk_mq_unquiesce_queue(ns->queue);
4638
	up_read(&ctrl->namespaces_rwsem);
4639
}
4640
EXPORT_SYMBOL_GPL(nvme_start_queues);
4641

K
Keith Busch 已提交
4642 4643 4644 4645 4646 4647 4648 4649 4650

void nvme_sync_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_sync_queue(ns->queue);
	up_read(&ctrl->namespaces_rwsem);
4651 4652 4653

	if (ctrl->admin_q)
		blk_sync_queue(ctrl->admin_q);
K
Keith Busch 已提交
4654 4655 4656
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);

4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679
struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path)
{
	struct nvme_ctrl *ctrl;
	struct file *f;

	f = filp_open(path, O_RDWR, 0);
	if (IS_ERR(f))
		return ERR_CAST(f);

	if (f->f_op != &nvme_dev_fops) {
		ctrl = ERR_PTR(-EINVAL);
		goto out_close;
	}

	ctrl = f->private_data;
	nvme_get_ctrl(ctrl);

out_close:
	filp_close(f, NULL);
	return ctrl;
}
EXPORT_SYMBOL_NS_GPL(nvme_ctrl_get_by_path, NVME_TARGET_PASSTHRU);

4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697
/*
 * Check we didn't inadvertently grow the command structure sizes:
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
K
Keith Busch 已提交
4698 4699
	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4700 4701 4702 4703 4704 4705 4706
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
}


4707
static int __init nvme_core_init(void)
4708
{
4709
	int result = -ENOMEM;
4710

4711 4712
	_nvme_check_size();

4713 4714 4715
	nvme_wq = alloc_workqueue("nvme-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_wq)
4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726
		goto out;

	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_reset_wq)
		goto destroy_wq;

	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_delete_wq)
		goto destroy_reset_wq;
4727

4728
	result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4729
	if (result < 0)
4730
		goto destroy_delete_wq;
4731 4732 4733 4734 4735 4736

	nvme_class = class_create(THIS_MODULE, "nvme");
	if (IS_ERR(nvme_class)) {
		result = PTR_ERR(nvme_class);
		goto unregister_chrdev;
	}
4737
	nvme_class->dev_uevent = nvme_class_uevent;
4738

C
Christoph Hellwig 已提交
4739 4740 4741 4742 4743
	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
	if (IS_ERR(nvme_subsys_class)) {
		result = PTR_ERR(nvme_subsys_class);
		goto destroy_class;
	}
4744
	return 0;
4745

C
Christoph Hellwig 已提交
4746 4747
destroy_class:
	class_destroy(nvme_class);
4748
unregister_chrdev:
4749
	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4750 4751 4752 4753
destroy_delete_wq:
	destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
	destroy_workqueue(nvme_reset_wq);
4754 4755
destroy_wq:
	destroy_workqueue(nvme_wq);
4756
out:
4757
	return result;
4758 4759
}

4760
static void __exit nvme_core_exit(void)
4761
{
C
Christoph Hellwig 已提交
4762
	class_destroy(nvme_subsys_class);
4763
	class_destroy(nvme_class);
4764
	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4765 4766
	destroy_workqueue(nvme_delete_wq);
	destroy_workqueue(nvme_reset_wq);
4767
	destroy_workqueue(nvme_wq);
M
Max Gurtovoy 已提交
4768
	ida_destroy(&nvme_instance_ida);
4769
}
4770 4771 4772 4773 4774

MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
module_init(nvme_core_init);
module_exit(nvme_core_exit);