core.c 73.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
17
#include <linux/delay.h>
18
#include <linux/errno.h>
19
#include <linux/hdreg.h>
20
#include <linux/kernel.h>
21 22
#include <linux/module.h>
#include <linux/list_sort.h>
23 24
#include <linux/slab.h>
#include <linux/types.h>
25 26 27 28
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/t10-pi.h>
29
#include <linux/pm_qos.h>
30
#include <asm/unaligned.h>
31 32

#include "nvme.h"
S
Sagi Grimberg 已提交
33
#include "fabrics.h"
34

35 36
#define NVME_MINORS		(1U << MINORBITS)

37 38
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
39
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
40
EXPORT_SYMBOL_GPL(admin_timeout);
41

42 43
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
44
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
45
EXPORT_SYMBOL_GPL(nvme_io_timeout);
46

47
static unsigned char shutdown_timeout = 5;
48 49 50
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

51 52
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
K
Keith Busch 已提交
53
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
54

55 56 57
static int nvme_char_major;
module_param(nvme_char_major, int, 0);

58
static unsigned long default_ps_max_latency_us = 100000;
59 60 61 62
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

63 64 65 66
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

67 68 69 70
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");

71 72 73
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

74
static LIST_HEAD(nvme_ctrl_list);
M
Ming Lin 已提交
75
static DEFINE_SPINLOCK(dev_list_lock);
76

77 78
static struct class *nvme_class;

79 80 81 82 83
static __le32 nvme_get_log_dw10(u8 lid, size_t size)
{
	return cpu_to_le32((((size / 4) - 1) << 16) | lid);
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
	if (!queue_work(nvme_wq, &ctrl->reset_work))
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
	if (!ret)
		flush_work(&ctrl->reset_work);
	return ret;
}

104
static blk_status_t nvme_error_status(struct request *req)
105 106 107
{
	switch (nvme_req(req)->status & 0x7ff) {
	case NVME_SC_SUCCESS:
108
		return BLK_STS_OK;
109
	case NVME_SC_CAP_EXCEEDED:
110
		return BLK_STS_NOSPC;
111
	case NVME_SC_ONCS_NOT_SUPPORTED:
112
		return BLK_STS_NOTSUPP;
113 114 115
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
116 117
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
118
		return BLK_STS_MEDIUM;
119 120 121 122 123 124 125
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
126 127
	default:
		return BLK_STS_IOERR;
128 129 130
	}
}

131
static inline bool nvme_req_needs_retry(struct request *req)
132
{
133 134
	if (blk_noretry_request(req))
		return false;
135
	if (nvme_req(req)->status & NVME_SC_DNR)
136
		return false;
137
	if (nvme_req(req)->retries >= nvme_max_retries)
138 139
		return false;
	return true;
140 141 142 143
}

void nvme_complete_rq(struct request *req)
{
144 145
	if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
		nvme_req(req)->retries++;
146
		blk_mq_requeue_request(req, true);
147
		return;
148 149
	}

150
	blk_mq_end_request(req, nvme_error_status(req));
151 152 153
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);

154 155 156 157 158 159 160 161 162 163 164 165 166
void nvme_cancel_request(struct request *req, void *data, bool reserved)
{
	int status;

	if (!blk_mq_request_started(req))
		return;

	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

	status = NVME_SC_ABORT_REQ;
	if (blk_queue_dying(req->q))
		status |= NVME_SC_DNR;
167
	nvme_req(req)->status = status;
168
	blk_mq_complete_request(req);
169

170 171 172
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

173 174 175
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
176
	enum nvme_ctrl_state old_state;
177
	unsigned long flags;
178 179
	bool changed = false;

180
	spin_lock_irqsave(&ctrl->lock, flags);
181 182

	old_state = ctrl->state;
183 184 185
	switch (new_state) {
	case NVME_CTRL_LIVE:
		switch (old_state) {
186
		case NVME_CTRL_NEW:
187
		case NVME_CTRL_RESETTING:
188
		case NVME_CTRL_RECONNECTING:
189 190 191 192 193 194 195 196 197
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
198 199 200 201 202 203 204 205 206
		case NVME_CTRL_LIVE:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_RECONNECTING:
		switch (old_state) {
207 208 209 210 211 212 213 214 215 216 217
		case NVME_CTRL_LIVE:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
		case NVME_CTRL_RESETTING:
218
		case NVME_CTRL_RECONNECTING:
219 220 221 222 223 224
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
225 226 227 228 229 230 231 232 233
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
234 235 236 237 238 239 240
	default:
		break;
	}

	if (changed)
		ctrl->state = new_state;

241
	spin_unlock_irqrestore(&ctrl->lock, flags);
242

243 244 245 246
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

247 248 249 250
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

251 252
	if (ns->ndev)
		nvme_nvm_unregister(ns);
253

254 255 256 257 258
	if (ns->disk) {
		spin_lock(&dev_list_lock);
		ns->disk->private_data = NULL;
		spin_unlock(&dev_list_lock);
	}
259 260

	put_disk(ns->disk);
261 262
	ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
	nvme_put_ctrl(ns->ctrl);
263 264 265
	kfree(ns);
}

266
static void nvme_put_ns(struct nvme_ns *ns)
267 268 269 270 271 272 273 274 275 276
{
	kref_put(&ns->kref, nvme_free_ns);
}

static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
{
	struct nvme_ns *ns;

	spin_lock(&dev_list_lock);
	ns = disk->private_data;
277 278 279 280 281 282
	if (ns) {
		if (!kref_get_unless_zero(&ns->kref))
			goto fail;
		if (!try_module_get(ns->ctrl->ops->module))
			goto fail_put_ns;
	}
283 284 285
	spin_unlock(&dev_list_lock);

	return ns;
286 287 288 289 290 291

fail_put_ns:
	kref_put(&ns->kref, nvme_free_ns);
fail:
	spin_unlock(&dev_list_lock);
	return NULL;
292 293
}

294
struct request *nvme_alloc_request(struct request_queue *q,
295
		struct nvme_command *cmd, unsigned int flags, int qid)
296
{
297
	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
298 299
	struct request *req;

300
	if (qid == NVME_QID_ANY) {
301
		req = blk_mq_alloc_request(q, op, flags);
302
	} else {
303
		req = blk_mq_alloc_request_hctx(q, op, flags,
304 305
				qid ? qid - 1 : 0);
	}
306
	if (IS_ERR(req))
307
		return req;
308 309

	req->cmd_flags |= REQ_FAILFAST_DRIVER;
310
	nvme_req(req)->cmd = cmd;
311

312 313
	return req;
}
314
EXPORT_SYMBOL_GPL(nvme_alloc_request);
315

316 317 318 319 320 321 322
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));

	c.directive.opcode = nvme_admin_directive_send;
A
Arnav Dawn 已提交
323
	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
	c.directive.dtype = NVME_DIR_IDENTIFY;
	c.directive.tdtype = NVME_DIR_STREAMS;
	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
}

static int nvme_disable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, false);
}

static int nvme_enable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, true);
}

static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
				  struct streams_directive_params *s, u32 nsid)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	memset(s, 0, sizeof(*s));

	c.directive.opcode = nvme_admin_directive_recv;
	c.directive.nsid = cpu_to_le32(nsid);
352
	c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
	c.directive.dtype = NVME_DIR_STREAMS;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
}

static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
	struct streams_directive_params s;
	int ret;

	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
		return 0;
	if (!streams)
		return 0;

	ret = nvme_enable_streams(ctrl);
	if (ret)
		return ret;

A
Arnav Dawn 已提交
373
	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	if (ret)
		return ret;

	ctrl->nssa = le16_to_cpu(s.nssa);
	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
		dev_info(ctrl->device, "too few streams (%u) available\n",
					ctrl->nssa);
		nvme_disable_streams(ctrl);
		return 0;
	}

	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
	return 0;
}

/*
 * Check if 'req' has a write hint associated with it. If it does, assign
 * a valid namespace stream to the write.
 */
static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
				     struct request *req, u16 *control,
				     u32 *dsmgmt)
{
	enum rw_hint streamid = req->write_hint;

	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
		streamid = 0;
	else {
		streamid--;
		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
			return;

		*control |= NVME_RW_DTYPE_STREAMS;
		*dsmgmt |= streamid << 16;
	}

	if (streamid < ARRAY_SIZE(req->q->write_hints))
		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}

M
Ming Lin 已提交
415 416 417 418 419 420 421 422
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->common.opcode = nvme_cmd_flush;
	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
}

423
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
424 425
		struct nvme_command *cmnd)
{
426
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
M
Ming Lin 已提交
427
	struct nvme_dsm_range *range;
428
	struct bio *bio;
M
Ming Lin 已提交
429

430
	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
M
Ming Lin 已提交
431
	if (!range)
432
		return BLK_STS_RESOURCE;
M
Ming Lin 已提交
433

434 435 436 437 438 439 440 441 442 443 444 445
	__rq_for_each_bio(bio, req) {
		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

		range[n].cattr = cpu_to_le32(0);
		range[n].nlb = cpu_to_le32(nlb);
		range[n].slba = cpu_to_le64(slba);
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
		kfree(range);
446
		return BLK_STS_IOERR;
447
	}
M
Ming Lin 已提交
448 449 450 451

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->dsm.opcode = nvme_cmd_dsm;
	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
452
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
M
Ming Lin 已提交
453 454
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

455 456
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
457
	req->special_vec.bv_len = sizeof(*range) * segments;
458
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
M
Ming Lin 已提交
459

460
	return BLK_STS_OK;
M
Ming Lin 已提交
461 462
}

463 464
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
M
Ming Lin 已提交
465
{
466
	struct nvme_ctrl *ctrl = ns->ctrl;
M
Ming Lin 已提交
467 468 469
	u16 control = 0;
	u32 dsmgmt = 0;

470 471 472 473 474
	/*
	 * If formated with metadata, require the block layer provide a buffer
	 * unless this namespace is formated such that the metadata can be
	 * stripped/generated by the controller with PRACT=1.
	 */
475 476
	if (ns && ns->ms &&
	    (!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
477 478 479
	    !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
		return BLK_STS_NOTSUPP;

M
Ming Lin 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493
	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);

494 495 496
	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);

M
Ming Lin 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	if (ns->ms) {
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
			cmnd->rw.reftag = cpu_to_le32(
					nvme_block_nr(ns, blk_rq_pos(req)));
			break;
		}
		if (!blk_integrity_rq(req))
			control |= NVME_RW_PRINFO_PRACT;
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
516
	return 0;
M
Ming Lin 已提交
517 518
}

519
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
M
Ming Lin 已提交
520 521
		struct nvme_command *cmd)
{
522
	blk_status_t ret = BLK_STS_OK;
M
Ming Lin 已提交
523

524
	if (!(req->rq_flags & RQF_DONTPREP)) {
525
		nvme_req(req)->retries = 0;
526
		nvme_req(req)->flags = 0;
527 528 529
		req->rq_flags |= RQF_DONTPREP;
	}

530 531 532
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
533
		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
534 535
		break;
	case REQ_OP_FLUSH:
M
Ming Lin 已提交
536
		nvme_setup_flush(ns, cmd);
537
		break;
538 539
	case REQ_OP_WRITE_ZEROES:
		/* currently only aliased to deallocate for a few ctrls: */
540
	case REQ_OP_DISCARD:
M
Ming Lin 已提交
541
		ret = nvme_setup_discard(ns, req, cmd);
542 543 544
		break;
	case REQ_OP_READ:
	case REQ_OP_WRITE:
545
		ret = nvme_setup_rw(ns, req, cmd);
546 547 548
		break;
	default:
		WARN_ON_ONCE(1);
549
		return BLK_STS_IOERR;
550
	}
M
Ming Lin 已提交
551

552
	cmd->common.command_id = req->tag;
M
Ming Lin 已提交
553 554 555 556
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

557 558 559 560 561
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
562
		union nvme_result *result, void *buffer, unsigned bufflen,
563
		unsigned timeout, int qid, int at_head, int flags)
564 565 566 567
{
	struct request *req;
	int ret;

568
	req = nvme_alloc_request(q, cmd, flags, qid);
569 570 571 572 573
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;

574 575 576 577
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
578 579
	}

580
	blk_execute_rq(req->q, NULL, req, at_head);
581 582
	if (result)
		*result = nvme_req(req)->result;
583 584 585 586
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
587 588 589 590
 out:
	blk_mq_free_request(req);
	return ret;
}
591
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
592 593 594 595

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
596 597
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
			NVME_QID_ANY, 0, 0);
598
}
599
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
600

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
		unsigned len, u32 seed, bool write)
{
	struct bio_integrity_payload *bip;
	int ret = -ENOMEM;
	void *buf;

	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		goto out;

	ret = -EFAULT;
	if (write && copy_from_user(buf, ubuf, len))
		goto out_free_meta;

	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
	if (IS_ERR(bip)) {
		ret = PTR_ERR(bip);
		goto out_free_meta;
	}

	bip->bip_iter.bi_size = len;
	bip->bip_iter.bi_sector = seed;
	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
			offset_in_page(buf));
	if (ret == len)
		return buf;
	ret = -ENOMEM;
out_free_meta:
	kfree(buf);
out:
	return ERR_PTR(ret);
}

635
static int nvme_submit_user_cmd(struct request_queue *q,
636 637 638
		struct nvme_command *cmd, void __user *ubuffer,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
		u32 meta_seed, u32 *result, unsigned timeout)
639
{
640
	bool write = nvme_is_write(cmd);
641 642
	struct nvme_ns *ns = q->queuedata;
	struct gendisk *disk = ns ? ns->disk : NULL;
643
	struct request *req;
644 645
	struct bio *bio = NULL;
	void *meta = NULL;
646 647
	int ret;

648
	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
649 650 651 652 653 654
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;

	if (ubuffer && bufflen) {
655 656 657 658 659
		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
				GFP_KERNEL);
		if (ret)
			goto out;
		bio = req->bio;
660
		bio->bi_disk = disk;
661 662 663 664 665
		if (disk && meta_buffer && meta_len) {
			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
					meta_seed, write);
			if (IS_ERR(meta)) {
				ret = PTR_ERR(meta);
666 667 668 669
				goto out_unmap;
			}
		}
	}
670

671
	blk_execute_rq(req->q, disk, req, 0);
672 673 674 675
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
676
	if (result)
677
		*result = le32_to_cpu(nvme_req(req)->result.u32);
678 679 680 681 682 683
	if (meta && !ret && !write) {
		if (copy_to_user(meta_buffer, meta, meta_len))
			ret = -EFAULT;
	}
	kfree(meta);
 out_unmap:
684
	if (bio)
685
		blk_rq_unmap_user(bio);
686 687 688 689 690
 out:
	blk_mq_free_request(req);
	return ret;
}

691
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
S
Sagi Grimberg 已提交
692 693 694 695 696
{
	struct nvme_ctrl *ctrl = rq->end_io_data;

	blk_mq_free_request(rq);

697
	if (status) {
S
Sagi Grimberg 已提交
698
		dev_err(ctrl->device,
699 700
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
S
Sagi Grimberg 已提交
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
		return;
	}

	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}

static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
	struct nvme_command c;
	struct request *rq;

	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_keep_alive;

	rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
			NVME_QID_ANY);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;

	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);

	return 0;
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);

	if (nvme_keep_alive(ctrl)) {
		/* allocation failure, reset the controller */
		dev_err(ctrl->device, "keep-alive failed\n");
736
		nvme_reset_ctrl(ctrl);
S
Sagi Grimberg 已提交
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
		return;
	}
}

void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
EXPORT_SYMBOL_GPL(nvme_start_keep_alive);

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

K
Keith Busch 已提交
760
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
761 762 763 764 765 766
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
767
	c.identify.cns = NVME_ID_CNS_CTRL;
768 769 770 771 772 773 774 775 776 777 778 779

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

780 781
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
		u8 *eui64, u8 *nguid, uuid_t *uuid)
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
{
	struct nvme_command c = { };
	int status;
	void *data;
	int pos;
	int len;

	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

797
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
798 799 800 801 802 803 804 805 806 807 808 809 810
				      NVME_IDENTIFY_DATA_SIZE);
	if (status)
		goto free_data;

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

		switch (cur->nidt) {
		case NVME_NIDT_EUI64:
			if (cur->nidl != NVME_NIDT_EUI64_LEN) {
811
				dev_warn(ctrl->device,
812 813 814 815 816
					 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_EUI64_LEN;
817
			memcpy(eui64, data + pos + sizeof(*cur), len);
818 819 820
			break;
		case NVME_NIDT_NGUID:
			if (cur->nidl != NVME_NIDT_NGUID_LEN) {
821
				dev_warn(ctrl->device,
822 823 824 825 826
					 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_NGUID_LEN;
827
			memcpy(nguid, data + pos + sizeof(*cur), len);
828 829 830
			break;
		case NVME_NIDT_UUID:
			if (cur->nidl != NVME_NIDT_UUID_LEN) {
831
				dev_warn(ctrl->device,
832 833 834 835 836
					 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_UUID_LEN;
837
			uuid_copy(uuid, data + pos + sizeof(*cur));
838 839 840 841 842 843 844 845 846 847 848 849 850 851
			break;
		default:
			/* Skip unnkown types */
			len = cur->nidl;
			break;
		}

		len += sizeof(*cur);
	}
free_data:
	kfree(data);
	return status;
}

852 853 854 855 856
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
	struct nvme_command c = { };

	c.identify.opcode = nvme_admin_identify;
857
	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
858 859 860 861
	c.identify.nsid = cpu_to_le32(nsid);
	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}

862 863
static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
		unsigned nsid)
864
{
865
	struct nvme_id_ns *id;
866 867 868 869
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
870 871
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
872
	c.identify.cns = NVME_ID_CNS_NS;
873

874 875 876
	id = kmalloc(sizeof(*id), GFP_KERNEL);
	if (!id)
		return NULL;
877

878 879 880 881 882 883 884 885
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
	if (error) {
		dev_warn(ctrl->device, "Identify namespace failed\n");
		kfree(id);
		return NULL;
	}

	return id;
886 887
}

K
Keith Busch 已提交
888
static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
889
		      void *buffer, size_t buflen, u32 *result)
890 891
{
	struct nvme_command c;
892
	union nvme_result res;
893
	int ret;
894 895 896 897 898 899

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_set_features;
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

900
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
901
			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
902
	if (ret >= 0 && result)
903
		*result = le32_to_cpu(res.u32);
904
	return ret;
905 906
}

C
Christoph Hellwig 已提交
907 908 909 910 911 912
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

913
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
C
Christoph Hellwig 已提交
914
			&result);
915
	if (status < 0)
C
Christoph Hellwig 已提交
916 917
		return status;

918 919 920 921 922 923
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
924
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
925 926 927 928 929 930
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

C
Christoph Hellwig 已提交
931 932
	return 0;
}
933
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
C
Christoph Hellwig 已提交
934

935 936 937 938 939 940 941 942 943
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length, meta_len;
	void __user *metadata;

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
944 945
	if (io.flags)
		return -EINVAL;
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
	case nvme_cmd_compare:
		break;
	default:
		return -EINVAL;
	}

	length = (io.nblocks + 1) << ns->lba_shift;
	meta_len = (io.nblocks + 1) * ns->ms;
	metadata = (void __user *)(uintptr_t)io.metadata;

	if (ns->ext) {
		length += meta_len;
		meta_len = 0;
	} else if (meta_len) {
		if ((io.metadata & 3) || !io.metadata)
			return -EINVAL;
	}

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
	c.rw.nsid = cpu_to_le32(ns->ns_id);
	c.rw.slba = cpu_to_le64(io.slba);
	c.rw.length = cpu_to_le16(io.nblocks);
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);

980
	return nvme_submit_user_cmd(ns->queue, &c,
981 982 983 984
			(void __user *)(uintptr_t)io.addr, length,
			metadata, meta_len, io.slba, NULL, 0);
}

985
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
986 987 988 989 990 991 992 993 994 995 996
			struct nvme_passthru_cmd __user *ucmd)
{
	struct nvme_passthru_cmd cmd;
	struct nvme_command c;
	unsigned timeout = 0;
	int status;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
		return -EFAULT;
997 998
	if (cmd.flags)
		return -EINVAL;
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016

	memset(&c, 0, sizeof(c));
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);

	if (cmd.timeout_ms)
		timeout = msecs_to_jiffies(cmd.timeout_ms);

	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1017
			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1018 1019
			(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
			0, &cmd.result, timeout);
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	if (status >= 0) {
		if (put_user(cmd.result, &ucmd->result))
			return -EFAULT;
	}

	return status;
}

static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
	case NVME_IOCTL_ID:
		force_successful_syscall_return();
		return ns->ns_id;
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
	case NVME_IOCTL_IO_CMD:
		return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
	case NVME_IOCTL_SUBMIT_IO:
		return nvme_submit_io(ns, (void __user *)arg);
	default:
1044 1045 1046 1047
#ifdef CONFIG_NVM
		if (ns->ndev)
			return nvme_nvm_ioctl(ns, cmd, arg);
#endif
1048
		if (is_sed_ioctl(cmd))
1049
			return sed_ioctl(ns->ctrl->opal_dev, cmd,
1050
					 (void __user *) arg);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		return -ENOTTY;
	}
}

static int nvme_open(struct block_device *bdev, fmode_t mode)
{
	return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
}

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
1062 1063 1064 1065
	struct nvme_ns *ns = disk->private_data;

	module_put(ns->ctrl->ops->module);
	nvme_put_ns(ns);
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
}

static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
	return 0;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
		u16 bs)
{
	struct nvme_ns *ns = disk->private_data;
	u16 old_ms = ns->ms;
	u8 pi_type = 0;

	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);

	/* PI implementation requires metadata equal t10 pi tuple size */
	if (ns->ms == sizeof(struct t10_pi_tuple))
		pi_type = id->dps & NVME_NS_DPS_PI_MASK;

	if (blk_get_integrity(disk) &&
	    (ns->pi_type != pi_type || ns->ms != old_ms ||
	     bs != queue_logical_block_size(disk->queue) ||
	     (ns->ms && ns->ext)))
		blk_integrity_unregister(disk);

	ns->pi_type = pi_type;
}

1101 1102 1103 1104
static void nvme_init_integrity(struct nvme_ns *ns)
{
	struct blk_integrity integrity;

1105
	memset(&integrity, 0, sizeof(integrity));
1106 1107 1108
	switch (ns->pi_type) {
	case NVME_NS_DPS_PI_TYPE3:
		integrity.profile = &t10_pi_type3_crc;
1109 1110
		integrity.tag_size = sizeof(u16) + sizeof(u32);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1111 1112 1113 1114
		break;
	case NVME_NS_DPS_PI_TYPE1:
	case NVME_NS_DPS_PI_TYPE2:
		integrity.profile = &t10_pi_type1_crc;
1115 1116
		integrity.tag_size = sizeof(u16);
		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		break;
	default:
		integrity.profile = NULL;
		break;
	}
	integrity.tuple_size = ns->ms;
	blk_integrity_register(ns->disk, &integrity);
	blk_queue_max_integrity_segments(ns->queue, 1);
}
#else
1127 1128 1129 1130
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
		u16 bs)
{
}
1131 1132 1133 1134 1135
static void nvme_init_integrity(struct nvme_ns *ns)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

1136 1137 1138 1139 1140 1141
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
	u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
	blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}

1142 1143
static void nvme_config_discard(struct nvme_ns *ns)
{
1144
	struct nvme_ctrl *ctrl = ns->ctrl;
1145
	u32 logical_block_size = queue_logical_block_size(ns->queue);
1146

1147 1148 1149
	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

1150 1151 1152 1153 1154 1155 1156 1157 1158
	if (ctrl->nr_streams && ns->sws && ns->sgs) {
		unsigned int sz = logical_block_size * ns->sws * ns->sgs;

		ns->queue->limits.discard_alignment = sz;
		ns->queue->limits.discard_granularity = sz;
	} else {
		ns->queue->limits.discard_alignment = logical_block_size;
		ns->queue->limits.discard_granularity = logical_block_size;
	}
1159
	blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
1160
	blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
1161
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1162 1163 1164

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
1165 1166
}

1167 1168
static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
		struct nvme_id_ns *id, u8 *eui64, u8 *nguid, uuid_t *uuid)
1169
{
1170 1171 1172 1173 1174
	if (ctrl->vs >= NVME_VS(1, 1, 0))
		memcpy(eui64, id->eui64, sizeof(id->eui64));
	if (ctrl->vs >= NVME_VS(1, 2, 0))
		memcpy(nguid, id->nguid, sizeof(id->nguid));
	if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1175 1176 1177
		 /* Don't treat error as fatal we potentially
		  * already have a NGUID or EUI-64
		  */
1178 1179
		if (nvme_identify_ns_descs(ctrl, nsid, eui64, nguid, uuid))
			dev_warn(ctrl->device,
1180 1181
				 "%s: Identify Descriptors failed\n", __func__);
	}
1182 1183 1184 1185 1186
}

static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
	struct nvme_ns *ns = disk->private_data;
1187
	struct nvme_ctrl *ctrl = ns->ctrl;
1188
	u16 bs;
1189 1190 1191 1192 1193

	/*
	 * If identify namespace failed, use default 512 byte block size so
	 * block layer can use before failing read/write for 0 capacity.
	 */
1194
	ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1195 1196 1197
	if (ns->lba_shift == 0)
		ns->lba_shift = 9;
	bs = 1 << ns->lba_shift;
1198
	ns->noiob = le16_to_cpu(id->noiob);
1199 1200 1201

	blk_mq_freeze_queue(disk->queue);

1202
	if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
1203
		nvme_prep_integrity(disk, id, bs);
1204
	blk_queue_logical_block_size(ns->queue, bs);
1205 1206
	if (ns->noiob)
		nvme_set_chunk_size(ns);
K
Keith Busch 已提交
1207
	if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
1208 1209 1210 1211 1212 1213
		nvme_init_integrity(ns);
	if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
		set_capacity(disk, 0);
	else
		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

1214
	if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
1215 1216
		nvme_config_discard(ns);
	blk_mq_unfreeze_queue(disk->queue);
1217
}
1218

1219 1220 1221
static int nvme_revalidate_disk(struct gendisk *disk)
{
	struct nvme_ns *ns = disk->private_data;
1222 1223
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_id_ns *id;
1224 1225
	u8 eui64[8] = { 0 }, nguid[16] = { 0 };
	uuid_t uuid = uuid_null;
1226
	int ret = 0;
1227 1228 1229 1230 1231 1232

	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
		set_capacity(disk, 0);
		return -ENODEV;
	}

1233 1234 1235
	id = nvme_identify_ns(ctrl, ns->ns_id);
	if (!id)
		return -ENODEV;
1236

1237 1238 1239 1240
	if (id->ncap == 0) {
		ret = -ENODEV;
		goto out;
	}
1241

1242 1243 1244 1245 1246 1247 1248 1249 1250
	nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
	if (!uuid_equal(&ns->uuid, &uuid) ||
	    memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
	    memcmp(&ns->eui, &eui64, sizeof(ns->eui))) {
		dev_err(ctrl->device,
			"identifiers changed for nsid %d\n", ns->ns_id);
		ret = -ENODEV;
	}

1251 1252 1253
out:
	kfree(id);
	return ret;
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
}

static char nvme_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 1;
	case PR_EXCLUSIVE_ACCESS:
		return 2;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 3;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 4;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 5;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 6;
	default:
		return 0;
	}
};

static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
				u64 key, u64 sa_key, u8 op)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;
	struct nvme_command c;
	u8 data[16] = { 0, };

	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	memset(&c, 0, sizeof(c));
	c.common.opcode = op;
	c.common.nsid = cpu_to_le32(ns->ns_id);
	c.common.cdw10[0] = cpu_to_le32(cdw10);

	return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
}

static int nvme_pr_register(struct block_device *bdev, u64 old,
		u64 new, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = old ? 2 : 0;
	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
}

static int nvme_pr_reserve(struct block_device *bdev, u64 key,
		enum pr_type type, unsigned flags)
{
	u32 cdw10;

	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;

	cdw10 = nvme_pr_type(type) << 8;
	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
}

static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
		enum pr_type type, bool abort)
{
	u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}

static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
1330
	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
}

static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}

static const struct pr_ops nvme_pr_ops = {
	.pr_register	= nvme_pr_register,
	.pr_reserve	= nvme_pr_reserve,
	.pr_release	= nvme_pr_release,
	.pr_preempt	= nvme_pr_preempt,
	.pr_clear	= nvme_pr_clear,
};

1348
#ifdef CONFIG_BLK_SED_OPAL
1349 1350
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send)
1351
{
1352
	struct nvme_ctrl *ctrl = data;
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	struct nvme_command cmd;

	memset(&cmd, 0, sizeof(cmd));
	if (send)
		cmd.common.opcode = nvme_admin_security_send;
	else
		cmd.common.opcode = nvme_admin_security_recv;
	cmd.common.nsid = 0;
	cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
	cmd.common.cdw10[1] = cpu_to_le32(len);

	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */

1370
static const struct block_device_operations nvme_fops = {
1371 1372
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
1373
	.compat_ioctl	= nvme_ioctl,
1374 1375 1376 1377 1378 1379 1380
	.open		= nvme_open,
	.release	= nvme_release,
	.getgeo		= nvme_getgeo,
	.revalidate_disk= nvme_revalidate_disk,
	.pr_ops		= &nvme_pr_ops,
};

1381 1382 1383 1384 1385 1386 1387 1388
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
	unsigned long timeout =
		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
	int ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
K
Keith Busch 已提交
1389 1390
		if (csts == ~0)
			return -ENODEV;
1391 1392 1393 1394 1395 1396 1397
		if ((csts & NVME_CSTS_RDY) == bit)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
1398
			dev_err(ctrl->device,
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
				"Device not ready; aborting %s\n", enabled ?
						"initialisation" : "reset");
			return -ENODEV;
		}
	}

	return ret;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
{
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config &= ~NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
1424

1425
	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1426 1427
		msleep(NVME_QUIRK_DELAY_AMOUNT);

1428 1429
	return nvme_wait_ready(ctrl, cap, false);
}
1430
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442

int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
{
	/*
	 * Default to a 4K page size, with the intention to update this
	 * path in the future to accomodate architectures with differing
	 * kernel and IO page sizes.
	 */
	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
	int ret;

	if (page_shift < dev_page_min) {
1443
		dev_err(ctrl->device,
1444 1445 1446 1447 1448 1449 1450 1451 1452
			"Minimum device page size %u too large for host (%u)\n",
			1 << dev_page_min, 1 << page_shift);
		return -ENODEV;
	}

	ctrl->page_size = 1 << page_shift;

	ctrl->ctrl_config = NVME_CC_CSS_NVM;
	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1453
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
1454 1455 1456 1457 1458 1459 1460 1461
	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
	ctrl->ctrl_config |= NVME_CC_ENABLE;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;
	return nvme_wait_ready(ctrl, cap, true);
}
1462
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
1463 1464 1465

int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
1466
	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	u32 csts;
	int ret;

	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
		return ret;

	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
			break;

		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
1485
			dev_err(ctrl->device,
1486 1487 1488 1489 1490 1491 1492
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return ret;
}
1493
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
1494

1495 1496 1497
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
1498 1499
	bool vwc = false;

1500
	if (ctrl->max_hw_sectors) {
1501 1502 1503
		u32 max_segments =
			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;

1504
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1505
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1506
	}
K
Keith Busch 已提交
1507 1508
	if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
		blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1509
	blk_queue_virt_boundary(q, ctrl->page_size - 1);
1510 1511 1512
	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
		vwc = true;
	blk_queue_write_cache(q, vwc, vwc);
1513 1514
}

1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
	int ret;

	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
		return 0;

	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
			NULL);
	if (ret)
		dev_warn_once(ctrl->device,
			"could not set timestamp (%d)\n", ret);
	return ret;
}

1532
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
1533 1534 1535 1536 1537 1538 1539 1540
{
	/*
	 * APST (Autonomous Power State Transition) lets us program a
	 * table of power state transitions that the controller will
	 * perform automatically.  We configure it with a simple
	 * heuristic: we are willing to spend at most 2% of the time
	 * transitioning between power states.  Therefore, when running
	 * in any given state, we will enter the next lower-power
A
Andy Lutomirski 已提交
1541
	 * non-operational state after waiting 50 * (enlat + exlat)
1542
	 * microseconds, as long as that state's exit latency is under
1543 1544 1545 1546 1547 1548 1549 1550 1551
	 * the requested maximum latency.
	 *
	 * We will not autonomously enter any non-operational state for
	 * which the total latency exceeds ps_max_latency_us.  Users
	 * can set ps_max_latency_us to zero to turn off APST.
	 */

	unsigned apste;
	struct nvme_feat_auto_pst *table;
1552 1553
	u64 max_lat_us = 0;
	int max_ps = -1;
1554 1555 1556 1557 1558 1559 1560
	int ret;

	/*
	 * If APST isn't supported or if we haven't been initialized yet,
	 * then don't do anything.
	 */
	if (!ctrl->apsta)
1561
		return 0;
1562 1563 1564

	if (ctrl->npss > 31) {
		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
1565
		return 0;
1566 1567 1568 1569
	}

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
1570
		return 0;
1571

1572
	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
1573 1574
		/* Turn off APST. */
		apste = 0;
1575
		dev_dbg(ctrl->device, "APST disabled\n");
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
	} else {
		__le64 target = cpu_to_le64(0);
		int state;

		/*
		 * Walk through all states from lowest- to highest-power.
		 * According to the spec, lower-numbered states use more
		 * power.  NPSS, despite the name, is the index of the
		 * lowest-power state, not the number of states.
		 */
		for (state = (int)ctrl->npss; state >= 0; state--) {
1587
			u64 total_latency_us, exit_latency_us, transition_ms;
1588 1589 1590 1591

			if (target)
				table->entries[state] = target;

1592 1593 1594 1595 1596 1597 1598 1599
			/*
			 * Don't allow transitions to the deepest state
			 * if it's quirked off.
			 */
			if (state == ctrl->npss &&
			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
				continue;

1600 1601 1602 1603 1604 1605 1606 1607
			/*
			 * Is this state a useful non-operational state for
			 * higher-power states to autonomously transition to?
			 */
			if (!(ctrl->psd[state].flags &
			      NVME_PS_FLAGS_NON_OP_STATE))
				continue;

1608 1609 1610
			exit_latency_us =
				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
			if (exit_latency_us > ctrl->ps_max_latency_us)
1611 1612
				continue;

1613 1614 1615 1616
			total_latency_us =
				exit_latency_us +
				le32_to_cpu(ctrl->psd[state].entry_lat);

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
			/*
			 * This state is good.  Use it as the APST idle
			 * target for higher power states.
			 */
			transition_ms = total_latency_us + 19;
			do_div(transition_ms, 20);
			if (transition_ms > (1 << 24) - 1)
				transition_ms = (1 << 24) - 1;

			target = cpu_to_le64((state << 3) |
					     (transition_ms << 8));
1628 1629 1630 1631 1632 1633

			if (max_ps == -1)
				max_ps = state;

			if (total_latency_us > max_lat_us)
				max_lat_us = total_latency_us;
1634 1635 1636
		}

		apste = 1;
1637 1638 1639 1640 1641 1642 1643

		if (max_ps == -1) {
			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
		} else {
			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
				max_ps, max_lat_us, (int)sizeof(*table), table);
		}
1644 1645 1646 1647 1648 1649 1650 1651
	}

	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
				table, sizeof(*table), NULL);
	if (ret)
		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);

	kfree(table);
1652
	return ret;
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
}

static void nvme_set_latency_tolerance(struct device *dev, s32 val)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	u64 latency;

	switch (val) {
	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
	case PM_QOS_LATENCY_ANY:
		latency = U64_MAX;
		break;

	default:
		latency = val;
	}

	if (ctrl->ps_max_latency_us != latency) {
		ctrl->ps_max_latency_us = latency;
		nvme_configure_apst(ctrl);
	}
}

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
struct nvme_core_quirk_entry {
	/*
	 * NVMe model and firmware strings are padded with spaces.  For
	 * simplicity, strings in the quirk table are padded with NULLs
	 * instead.
	 */
	u16 vid;
	const char *mn;
	const char *fr;
	unsigned long quirks;
};

static const struct nvme_core_quirk_entry core_quirks[] = {
1689
	{
1690 1691 1692 1693 1694 1695
		/*
		 * This Toshiba device seems to die using any APST states.  See:
		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
		 */
		.vid = 0x1179,
		.mn = "THNSF5256GPUK TOSHIBA",
1696
		.quirks = NVME_QUIRK_NO_APST,
1697
	}
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
};

/* match is null-terminated but idstr is space-padded. */
static bool string_matches(const char *idstr, const char *match, size_t len)
{
	size_t matchlen;

	if (!match)
		return true;

	matchlen = strlen(match);
	WARN_ON_ONCE(matchlen > len);

	if (memcmp(idstr, match, matchlen))
		return false;

	for (; matchlen < len; matchlen++)
		if (idstr[matchlen] != ' ')
			return false;

	return true;
}

static bool quirk_matches(const struct nvme_id_ctrl *id,
			  const struct nvme_core_quirk_entry *q)
{
	return q->vid == le16_to_cpu(id->vid) &&
		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
		string_matches(id->fr, q->fr, sizeof(id->fr));
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	size_t nqnlen;
	int off;

	nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
	if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
		strcpy(ctrl->subnqn, id->subnqn);
		return;
	}

	if (ctrl->vs >= NVME_VS(1, 2, 1))
		dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");

	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
	off = snprintf(ctrl->subnqn, NVMF_NQN_SIZE,
			"nqn.2014.08.org.nvmexpress:%4x%4x",
			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
	memcpy(ctrl->subnqn + off, id->sn, sizeof(id->sn));
	off += sizeof(id->sn);
	memcpy(ctrl->subnqn + off, id->mn, sizeof(id->mn));
	off += sizeof(id->mn);
	memset(ctrl->subnqn + off, 0, sizeof(ctrl->subnqn) - off);
}

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
/*
 * Initialize the cached copies of the Identify data and various controller
 * register in our nvme_ctrl structure.  This should be called as soon as
 * the admin queue is fully up and running.
 */
int nvme_init_identify(struct nvme_ctrl *ctrl)
{
	struct nvme_id_ctrl *id;
	u64 cap;
	int ret, page_shift;
1764
	u32 max_hw_sectors;
1765
	bool prev_apst_enabled;
1766

1767 1768
	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
	if (ret) {
1769
		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
1770 1771 1772
		return ret;
	}

1773 1774
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
	if (ret) {
1775
		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
1776 1777 1778 1779
		return ret;
	}
	page_shift = NVME_CAP_MPSMIN(cap) + 12;

1780
	if (ctrl->vs >= NVME_VS(1, 1, 0))
1781 1782
		ctrl->subsystem = NVME_CAP_NSSRC(cap);

1783 1784
	ret = nvme_identify_ctrl(ctrl, &id);
	if (ret) {
1785
		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
1786 1787 1788
		return -EIO;
	}

1789 1790
	nvme_init_subnqn(ctrl, id);

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	if (!ctrl->identified) {
		/*
		 * Check for quirks.  Quirk can depend on firmware version,
		 * so, in principle, the set of quirks present can change
		 * across a reset.  As a possible future enhancement, we
		 * could re-scan for quirks every time we reinitialize
		 * the device, but we'd have to make sure that the driver
		 * behaves intelligently if the quirks change.
		 */

		int i;

		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
			if (quirk_matches(id, &core_quirks[i]))
				ctrl->quirks |= core_quirks[i].quirks;
		}
	}

1809
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
1810
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
1811 1812 1813
		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
	}

1814
	ctrl->oacs = le16_to_cpu(id->oacs);
1815
	ctrl->vid = le16_to_cpu(id->vid);
1816
	ctrl->oncs = le16_to_cpup(&id->oncs);
1817
	atomic_set(&ctrl->abort_limit, id->acl + 1);
1818
	ctrl->vwc = id->vwc;
M
Ming Lin 已提交
1819
	ctrl->cntlid = le16_to_cpup(&id->cntlid);
1820 1821 1822 1823
	memcpy(ctrl->serial, id->sn, sizeof(id->sn));
	memcpy(ctrl->model, id->mn, sizeof(id->mn));
	memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
	if (id->mdts)
1824
		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
1825
	else
1826 1827 1828
		max_hw_sectors = UINT_MAX;
	ctrl->max_hw_sectors =
		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
1829

1830
	nvme_set_queue_limits(ctrl, ctrl->admin_q);
1831
	ctrl->sgls = le32_to_cpu(id->sgls);
S
Sagi Grimberg 已提交
1832
	ctrl->kas = le16_to_cpu(id->kas);
1833

1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
	if (id->rtd3e) {
		/* us -> s */
		u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;

		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
						 shutdown_timeout, 60);

		if (ctrl->shutdown_timeout != shutdown_timeout)
			dev_warn(ctrl->device,
				 "Shutdown timeout set to %u seconds\n",
				 ctrl->shutdown_timeout);
	} else
		ctrl->shutdown_timeout = shutdown_timeout;

1848
	ctrl->npss = id->npss;
1849 1850
	ctrl->apsta = id->apsta;
	prev_apst_enabled = ctrl->apst_enabled;
1851 1852
	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
		if (force_apst && id->apsta) {
1853
			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
1854
			ctrl->apst_enabled = true;
1855
		} else {
1856
			ctrl->apst_enabled = false;
1857 1858
		}
	} else {
1859
		ctrl->apst_enabled = id->apsta;
1860
	}
1861 1862
	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));

1863
	if (ctrl->ops->flags & NVME_F_FABRICS) {
1864 1865 1866 1867 1868 1869 1870 1871 1872
		ctrl->icdoff = le16_to_cpu(id->icdoff);
		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
		ctrl->maxcmd = le16_to_cpu(id->maxcmd);

		/*
		 * In fabrics we need to verify the cntlid matches the
		 * admin connect
		 */
1873
		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
1874
			ret = -EINVAL;
1875 1876
			goto out_free;
		}
S
Sagi Grimberg 已提交
1877 1878

		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1879
			dev_err(ctrl->device,
S
Sagi Grimberg 已提交
1880 1881
				"keep-alive support is mandatory for fabrics\n");
			ret = -EINVAL;
1882
			goto out_free;
S
Sagi Grimberg 已提交
1883
		}
1884 1885
	} else {
		ctrl->cntlid = le16_to_cpu(id->cntlid);
1886 1887
		ctrl->hmpre = le32_to_cpu(id->hmpre);
		ctrl->hmmin = le32_to_cpu(id->hmmin);
1888 1889
		ctrl->hmminds = le32_to_cpu(id->hmminds);
		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
1890
	}
1891

1892
	kfree(id);
1893

1894
	if (ctrl->apst_enabled && !prev_apst_enabled)
1895
		dev_pm_qos_expose_latency_tolerance(ctrl->device);
1896
	else if (!ctrl->apst_enabled && prev_apst_enabled)
1897 1898
		dev_pm_qos_hide_latency_tolerance(ctrl->device);

1899 1900 1901
	ret = nvme_configure_apst(ctrl);
	if (ret < 0)
		return ret;
1902 1903 1904 1905
	
	ret = nvme_configure_timestamp(ctrl);
	if (ret < 0)
		return ret;
1906 1907 1908 1909

	ret = nvme_configure_directives(ctrl);
	if (ret < 0)
		return ret;
1910

1911
	ctrl->identified = true;
1912

1913 1914 1915 1916
	return 0;

out_free:
	kfree(id);
1917
	return ret;
1918
}
1919
EXPORT_SYMBOL_GPL(nvme_init_identify);
1920

1921
static int nvme_dev_open(struct inode *inode, struct file *file)
1922
{
1923 1924 1925
	struct nvme_ctrl *ctrl;
	int instance = iminor(inode);
	int ret = -ENODEV;
1926

1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
	spin_lock(&dev_list_lock);
	list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
		if (ctrl->instance != instance)
			continue;

		if (!ctrl->admin_q) {
			ret = -EWOULDBLOCK;
			break;
		}
		if (!kref_get_unless_zero(&ctrl->kref))
			break;
		file->private_data = ctrl;
		ret = 0;
		break;
	}
	spin_unlock(&dev_list_lock);

	return ret;
1945 1946
}

1947
static int nvme_dev_release(struct inode *inode, struct file *file)
1948
{
1949 1950 1951 1952
	nvme_put_ctrl(file->private_data);
	return 0;
}

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
	struct nvme_ns *ns;
	int ret;

	mutex_lock(&ctrl->namespaces_mutex);
	if (list_empty(&ctrl->namespaces)) {
		ret = -ENOTTY;
		goto out_unlock;
	}

	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
1966
		dev_warn(ctrl->device,
1967 1968 1969 1970 1971
			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
		ret = -EINVAL;
		goto out_unlock;
	}

1972
	dev_warn(ctrl->device,
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
	kref_get(&ns->kref);
	mutex_unlock(&ctrl->namespaces_mutex);

	ret = nvme_user_cmd(ctrl, ns, argp);
	nvme_put_ns(ns);
	return ret;

out_unlock:
	mutex_unlock(&ctrl->namespaces_mutex);
	return ret;
}

1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg)
{
	struct nvme_ctrl *ctrl = file->private_data;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_cmd(ctrl, NULL, argp);
	case NVME_IOCTL_IO_CMD:
1996
		return nvme_dev_user_cmd(ctrl, argp);
1997
	case NVME_IOCTL_RESET:
1998
		dev_warn(ctrl->device, "resetting controller\n");
1999
		return nvme_reset_ctrl_sync(ctrl);
2000 2001
	case NVME_IOCTL_SUBSYS_RESET:
		return nvme_reset_subsystem(ctrl);
K
Keith Busch 已提交
2002 2003 2004
	case NVME_IOCTL_RESCAN:
		nvme_queue_scan(ctrl);
		return 0;
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	default:
		return -ENOTTY;
	}
}

static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
	.release	= nvme_dev_release,
	.unlocked_ioctl	= nvme_dev_ioctl,
	.compat_ioctl	= nvme_dev_ioctl,
};

static ssize_t nvme_sysfs_reset(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	int ret;

2025
	ret = nvme_reset_ctrl_sync(ctrl);
2026 2027 2028
	if (ret < 0)
		return ret;
	return count;
2029
}
2030
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
2031

K
Keith Busch 已提交
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
static ssize_t nvme_sysfs_rescan(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	nvme_queue_scan(ctrl);
	return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);

2043 2044 2045
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
								char *buf)
{
2046
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2047 2048 2049 2050
	struct nvme_ctrl *ctrl = ns->ctrl;
	int serial_len = sizeof(ctrl->serial);
	int model_len = sizeof(ctrl->model);

2051 2052 2053
	if (!uuid_is_null(&ns->uuid))
		return sprintf(buf, "uuid.%pU\n", &ns->uuid);

2054 2055
	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
		return sprintf(buf, "eui.%16phN\n", ns->nguid);
2056 2057 2058 2059

	if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
		return sprintf(buf, "eui.%8phN\n", ns->eui);

2060 2061
	while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
				  ctrl->serial[serial_len - 1] == '\0'))
2062
		serial_len--;
2063 2064
	while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
				 ctrl->model[model_len - 1] == '\0'))
2065 2066 2067 2068 2069 2070 2071
		model_len--;

	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
		serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
}
static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);

2072 2073 2074 2075 2076 2077 2078 2079
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
			  char *buf)
{
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
	return sprintf(buf, "%pU\n", ns->nguid);
}
static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);

2080 2081 2082
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
								char *buf)
{
2083
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093

	/* For backward compatibility expose the NGUID to userspace if
	 * we have no UUID set
	 */
	if (uuid_is_null(&ns->uuid)) {
		printk_ratelimited(KERN_WARNING
				   "No UUID available providing old NGUID\n");
		return sprintf(buf, "%pU\n", ns->nguid);
	}
	return sprintf(buf, "%pU\n", &ns->uuid);
2094 2095 2096 2097 2098 2099
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);

static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
								char *buf)
{
2100
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2101 2102 2103 2104 2105 2106 2107
	return sprintf(buf, "%8phd\n", ns->eui);
}
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);

static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
								char *buf)
{
2108
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2109 2110 2111 2112 2113
	return sprintf(buf, "%d\n", ns->ns_id);
}
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);

static struct attribute *nvme_ns_attrs[] = {
2114
	&dev_attr_wwid.attr,
2115
	&dev_attr_uuid.attr,
2116
	&dev_attr_nguid.attr,
2117 2118 2119 2120 2121
	&dev_attr_eui.attr,
	&dev_attr_nsid.attr,
	NULL,
};

M
Ming Lin 已提交
2122
static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
2123 2124 2125
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
2126
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2127 2128

	if (a == &dev_attr_uuid.attr) {
2129 2130 2131 2132 2133
		if (uuid_is_null(&ns->uuid) ||
		    !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
			return 0;
	}
	if (a == &dev_attr_nguid.attr) {
2134
		if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
			return 0;
	}
	if (a == &dev_attr_eui.attr) {
		if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
			return 0;
	}
	return a->mode;
}

static const struct attribute_group nvme_ns_attr_group = {
	.attrs		= nvme_ns_attrs,
M
Ming Lin 已提交
2146
	.is_visible	= nvme_ns_attrs_are_visible,
2147 2148
};

M
Ming Lin 已提交
2149
#define nvme_show_str_function(field)						\
2150 2151 2152 2153 2154 2155 2156 2157
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
        return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field);	\
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

M
Ming Lin 已提交
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
#define nvme_show_int_function(field)						\
static ssize_t  field##_show(struct device *dev,				\
			    struct device_attribute *attr, char *buf)		\
{										\
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
        return sprintf(buf, "%d\n", ctrl->field);	\
}										\
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);

nvme_show_str_function(model);
nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
2171

M
Ming Lin 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
static ssize_t nvme_sysfs_delete(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	if (device_remove_file_self(dev, attr))
		ctrl->ops->delete_ctrl(ctrl);
	return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);

static ssize_t nvme_sysfs_show_transport(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
static ssize_t nvme_sysfs_show_state(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
	static const char *const state_name[] = {
		[NVME_CTRL_NEW]		= "new",
		[NVME_CTRL_LIVE]	= "live",
		[NVME_CTRL_RESETTING]	= "resetting",
		[NVME_CTRL_RECONNECTING]= "reconnecting",
		[NVME_CTRL_DELETING]	= "deleting",
		[NVME_CTRL_DEAD]	= "dead",
	};

	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
	    state_name[ctrl->state])
		return sprintf(buf, "%s\n", state_name[ctrl->state]);

	return sprintf(buf, "unknown state\n");
}

static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);

M
Ming Lin 已提交
2217 2218 2219 2220 2221 2222
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

2223
	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subnqn);
M
Ming Lin 已提交
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

static ssize_t nvme_sysfs_show_address(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);

2237 2238
static struct attribute *nvme_dev_attrs[] = {
	&dev_attr_reset_controller.attr,
K
Keith Busch 已提交
2239
	&dev_attr_rescan_controller.attr,
2240 2241 2242
	&dev_attr_model.attr,
	&dev_attr_serial.attr,
	&dev_attr_firmware_rev.attr,
M
Ming Lin 已提交
2243
	&dev_attr_cntlid.attr,
M
Ming Lin 已提交
2244 2245 2246 2247
	&dev_attr_delete_controller.attr,
	&dev_attr_transport.attr,
	&dev_attr_subsysnqn.attr,
	&dev_attr_address.attr,
2248
	&dev_attr_state.attr,
2249 2250 2251
	NULL
};

M
Ming Lin 已提交
2252 2253 2254 2255 2256 2257
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

2258 2259 2260 2261
	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
		return 0;
	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
		return 0;
M
Ming Lin 已提交
2262 2263 2264 2265

	return a->mode;
}

2266
static struct attribute_group nvme_dev_attrs_group = {
M
Ming Lin 已提交
2267 2268
	.attrs		= nvme_dev_attrs,
	.is_visible	= nvme_dev_attrs_are_visible,
2269 2270 2271 2272 2273 2274 2275
};

static const struct attribute_group *nvme_dev_attr_groups[] = {
	&nvme_dev_attrs_group,
	NULL,
};

2276 2277 2278 2279 2280 2281 2282 2283
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);

	return nsa->ns_id - nsb->ns_id;
}

2284
static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2285
{
2286
	struct nvme_ns *ns, *ret = NULL;
2287

2288
	mutex_lock(&ctrl->namespaces_mutex);
2289
	list_for_each_entry(ns, &ctrl->namespaces, list) {
2290 2291 2292 2293 2294
		if (ns->ns_id == nsid) {
			kref_get(&ns->kref);
			ret = ns;
			break;
		}
2295 2296 2297
		if (ns->ns_id > nsid)
			break;
	}
2298 2299
	mutex_unlock(&ctrl->namespaces_mutex);
	return ret;
2300 2301
}

2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
{
	struct streams_directive_params s;
	int ret;

	if (!ctrl->nr_streams)
		return 0;

	ret = nvme_get_stream_params(ctrl, &s, ns->ns_id);
	if (ret)
		return ret;

	ns->sws = le32_to_cpu(s.sws);
	ns->sgs = le16_to_cpu(s.sgs);

	if (ns->sws) {
		unsigned int bs = 1 << ns->lba_shift;

		blk_queue_io_min(ns->queue, bs * ns->sws);
		if (ns->sgs)
			blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
	}

	return 0;
}

2328 2329 2330 2331
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
2332 2333
	struct nvme_id_ns *id;
	char disk_name[DISK_NAME_LEN];
2334 2335 2336 2337 2338 2339
	int node = dev_to_node(ctrl->dev);

	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
		return;

2340 2341 2342 2343
	ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
	if (ns->instance < 0)
		goto out_free_ns;

2344 2345
	ns->queue = blk_mq_init_queue(ctrl->tagset);
	if (IS_ERR(ns->queue))
2346
		goto out_release_instance;
2347 2348 2349 2350 2351 2352 2353 2354 2355
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
	ns->queue->queuedata = ns;
	ns->ctrl = ctrl;

	kref_init(&ns->kref);
	ns->ns_id = nsid;
	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */

	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
2356
	nvme_set_queue_limits(ctrl, ns->queue);
2357
	nvme_setup_streams_ns(ctrl, ns);
2358

2359
	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
2360

2361 2362
	id = nvme_identify_ns(ctrl, nsid);
	if (!id)
2363 2364
		goto out_free_queue;

2365 2366 2367 2368 2369
	if (id->ncap == 0)
		goto out_free_id;

	nvme_report_ns_ids(ctrl, ns->ns_id, id, ns->eui, ns->nguid, &ns->uuid);

C
Christoph Hellwig 已提交
2370 2371 2372 2373 2374
	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
		if (nvme_nvm_register(ns, disk_name, node)) {
			dev_warn(ctrl->device, "LightNVM init failure\n");
			goto out_free_id;
		}
2375
	}
2376

2377 2378 2379
	disk = alloc_disk_node(0, node);
	if (!disk)
		goto out_free_id;
2380

2381 2382 2383 2384 2385 2386 2387 2388
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
	disk->flags = GENHD_FL_EXT_DEVT;
	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
	ns->disk = disk;

	__nvme_revalidate_disk(disk, id);
2389

2390 2391 2392 2393
	mutex_lock(&ctrl->namespaces_mutex);
	list_add_tail(&ns->list, &ctrl->namespaces);
	mutex_unlock(&ctrl->namespaces_mutex);

2394
	kref_get(&ctrl->kref);
2395 2396 2397

	kfree(id);

2398
	device_add_disk(ctrl->device, ns->disk);
2399 2400 2401 2402
	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
					&nvme_ns_attr_group))
		pr_warn("%s: failed to create sysfs group for identification\n",
			ns->disk->disk_name);
2403 2404 2405
	if (ns->ndev && nvme_nvm_register_sysfs(ns))
		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
			ns->disk->disk_name);
2406
	return;
2407 2408
 out_free_id:
	kfree(id);
2409 2410
 out_free_queue:
	blk_cleanup_queue(ns->queue);
2411 2412
 out_release_instance:
	ida_simple_remove(&ctrl->ns_ida, ns->instance);
2413 2414 2415 2416 2417 2418
 out_free_ns:
	kfree(ns);
}

static void nvme_ns_remove(struct nvme_ns *ns)
{
2419 2420
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;
2421

2422
	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
2423 2424
		if (blk_get_integrity(ns->disk))
			blk_integrity_unregister(ns->disk);
2425 2426
		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
					&nvme_ns_attr_group);
2427 2428
		if (ns->ndev)
			nvme_nvm_unregister_sysfs(ns);
2429 2430 2431
		del_gendisk(ns->disk);
		blk_cleanup_queue(ns->queue);
	}
2432 2433

	mutex_lock(&ns->ctrl->namespaces_mutex);
2434
	list_del_init(&ns->list);
2435 2436
	mutex_unlock(&ns->ctrl->namespaces_mutex);

2437 2438 2439
	nvme_put_ns(ns);
}

2440 2441 2442 2443
static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
	struct nvme_ns *ns;

2444
	ns = nvme_find_get_ns(ctrl, nsid);
2445
	if (ns) {
2446
		if (ns->disk && revalidate_disk(ns->disk))
2447
			nvme_ns_remove(ns);
2448
		nvme_put_ns(ns);
2449 2450 2451 2452
	} else
		nvme_alloc_ns(ctrl, nsid);
}

2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					unsigned nsid)
{
	struct nvme_ns *ns, *next;

	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
		if (ns->ns_id > nsid)
			nvme_ns_remove(ns);
	}
}

2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
	struct nvme_ns *ns;
	__le32 *ns_list;
	unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
	int ret = 0;

	ns_list = kzalloc(0x1000, GFP_KERNEL);
	if (!ns_list)
		return -ENOMEM;

	for (i = 0; i < num_lists; i++) {
		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
		if (ret)
2478
			goto free;
2479 2480 2481 2482 2483 2484 2485 2486 2487

		for (j = 0; j < min(nn, 1024U); j++) {
			nsid = le32_to_cpu(ns_list[j]);
			if (!nsid)
				goto out;

			nvme_validate_ns(ctrl, nsid);

			while (++prev < nsid) {
2488 2489
				ns = nvme_find_get_ns(ctrl, prev);
				if (ns) {
2490
					nvme_ns_remove(ns);
2491 2492
					nvme_put_ns(ns);
				}
2493 2494 2495 2496 2497
			}
		}
		nn -= j;
	}
 out:
2498 2499
	nvme_remove_invalid_namespaces(ctrl, prev);
 free:
2500 2501 2502 2503
	kfree(ns_list);
	return ret;
}

2504
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
2505 2506 2507
{
	unsigned i;

2508 2509 2510
	for (i = 1; i <= nn; i++)
		nvme_validate_ns(ctrl, i);

2511
	nvme_remove_invalid_namespaces(ctrl, nn);
2512 2513
}

2514
static void nvme_scan_work(struct work_struct *work)
2515
{
2516 2517
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, scan_work);
2518
	struct nvme_id_ctrl *id;
2519
	unsigned nn;
2520

2521 2522 2523
	if (ctrl->state != NVME_CTRL_LIVE)
		return;

2524 2525
	if (nvme_identify_ctrl(ctrl, &id))
		return;
2526 2527

	nn = le32_to_cpu(id->nn);
2528
	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
2529 2530 2531 2532
	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
		if (!nvme_scan_ns_list(ctrl, nn))
			goto done;
	}
2533
	nvme_scan_ns_sequential(ctrl, nn);
2534
 done:
2535
	mutex_lock(&ctrl->namespaces_mutex);
2536
	list_sort(NULL, &ctrl->namespaces, ns_cmp);
2537
	mutex_unlock(&ctrl->namespaces_mutex);
2538 2539
	kfree(id);
}
2540 2541 2542 2543 2544 2545 2546 2547

void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
	/*
	 * Do not queue new scan work when a controller is reset during
	 * removal.
	 */
	if (ctrl->state == NVME_CTRL_LIVE)
2548
		queue_work(nvme_wq, &ctrl->scan_work);
2549 2550
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
2551

2552 2553 2554 2555 2556
/*
 * This function iterates the namespace list unlocked to allow recovery from
 * controller failure. It is up to the caller to ensure the namespace list is
 * not modified by scan work while this function is executing.
 */
2557 2558 2559 2560
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns, *next;

2561 2562 2563 2564 2565 2566 2567 2568 2569
	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
	 * removing the namespaces' disks; fail all the queues now to avoid
	 * potentially having to clean up the failed sync later.
	 */
	if (ctrl->state == NVME_CTRL_DEAD)
		nvme_kill_queues(ctrl);

2570 2571 2572
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
		nvme_ns_remove(ns);
}
2573
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
2574

2575 2576 2577 2578 2579 2580
static void nvme_async_event_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, async_event_work);

	spin_lock_irq(&ctrl->lock);
2581
	while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
2582 2583 2584 2585 2586 2587 2588 2589 2590
		int aer_idx = --ctrl->event_limit;

		spin_unlock_irq(&ctrl->lock);
		ctrl->ops->submit_async_event(ctrl, aer_idx);
		spin_lock_irq(&ctrl->lock);
	}
	spin_unlock_irq(&ctrl->lock);
}

2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
{

	u32 csts;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
		return false;

	if (csts == ~0)
		return false;

	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
}

static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
	struct nvme_command c = { };
	struct nvme_fw_slot_info_log *log;

	log = kmalloc(sizeof(*log), GFP_KERNEL);
	if (!log)
		return;

	c.common.opcode = nvme_admin_get_log_page;
A
Arnav Dawn 已提交
2615
	c.common.nsid = cpu_to_le32(NVME_NSID_ALL);
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	c.common.cdw10[0] = nvme_get_log_dw10(NVME_LOG_FW_SLOT, sizeof(*log));

	if (!nvme_submit_sync_cmd(ctrl->admin_q, &c, log, sizeof(*log)))
		dev_warn(ctrl->device,
				"Get FW SLOT INFO log error\n");
	kfree(log);
}

static void nvme_fw_act_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(work,
				struct nvme_ctrl, fw_act_work);
	unsigned long fw_act_timeout;

	if (ctrl->mtfa)
		fw_act_timeout = jiffies +
				msecs_to_jiffies(ctrl->mtfa * 100);
	else
		fw_act_timeout = jiffies +
				msecs_to_jiffies(admin_timeout * 1000);

	nvme_stop_queues(ctrl);
	while (nvme_ctrl_pp_status(ctrl)) {
		if (time_after(jiffies, fw_act_timeout)) {
			dev_warn(ctrl->device,
				"Fw activation timeout, reset controller\n");
			nvme_reset_ctrl(ctrl);
			break;
		}
		msleep(100);
	}

	if (ctrl->state != NVME_CTRL_LIVE)
		return;

	nvme_start_queues(ctrl);
	/* read FW slot informationi to clear the AER*/
	nvme_get_fw_slot_info(ctrl);
}

2656 2657
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
		union nvme_result *res)
2658
{
2659 2660
	u32 result = le32_to_cpu(res->u32);
	bool done = true;
2661

2662 2663 2664 2665 2666
	switch (le16_to_cpu(status) >> 1) {
	case NVME_SC_SUCCESS:
		done = false;
		/*FALLTHRU*/
	case NVME_SC_ABORT_REQ:
2667
		++ctrl->event_limit;
2668
		if (ctrl->state == NVME_CTRL_LIVE)
2669
			queue_work(nvme_wq, &ctrl->async_event_work);
2670 2671 2672
		break;
	default:
		break;
2673 2674
	}

2675
	if (done)
2676 2677 2678 2679 2680 2681 2682
		return;

	switch (result & 0xff07) {
	case NVME_AER_NOTICE_NS_CHANGED:
		dev_info(ctrl->device, "rescanning\n");
		nvme_queue_scan(ctrl);
		break;
2683
	case NVME_AER_NOTICE_FW_ACT_STARTING:
2684
		queue_work(nvme_wq, &ctrl->fw_act_work);
2685
		break;
2686 2687 2688 2689 2690 2691 2692 2693 2694
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);

void nvme_queue_async_events(struct nvme_ctrl *ctrl)
{
	ctrl->event_limit = NVME_NR_AERS;
2695
	queue_work(nvme_wq, &ctrl->async_event_work);
2696 2697 2698
}
EXPORT_SYMBOL_GPL(nvme_queue_async_events);

2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
static DEFINE_IDA(nvme_instance_ida);

static int nvme_set_instance(struct nvme_ctrl *ctrl)
{
	int instance, error;

	do {
		if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
			return -ENODEV;

		spin_lock(&dev_list_lock);
		error = ida_get_new(&nvme_instance_ida, &instance);
		spin_unlock(&dev_list_lock);
	} while (error == -EAGAIN);

	if (error)
		return -ENODEV;

	ctrl->instance = instance;
	return 0;
}

static void nvme_release_instance(struct nvme_ctrl *ctrl)
{
	spin_lock(&dev_list_lock);
	ida_remove(&nvme_instance_ida, ctrl->instance);
	spin_unlock(&dev_list_lock);
}

2728
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
2729
{
2730
	nvme_stop_keep_alive(ctrl);
2731
	flush_work(&ctrl->async_event_work);
2732
	flush_work(&ctrl->scan_work);
2733
	cancel_work_sync(&ctrl->fw_act_work);
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
	if (ctrl->kato)
		nvme_start_keep_alive(ctrl);

	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_queue_async_events(ctrl);
		nvme_start_queues(ctrl);
	}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
2749

2750 2751
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
2752
	device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
2753 2754 2755 2756

	spin_lock(&dev_list_lock);
	list_del(&ctrl->node);
	spin_unlock(&dev_list_lock);
2757
}
2758
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
2759 2760 2761 2762

static void nvme_free_ctrl(struct kref *kref)
{
	struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
2763 2764 2765

	put_device(ctrl->device);
	nvme_release_instance(ctrl);
2766
	ida_destroy(&ctrl->ns_ida);
2767 2768 2769 2770 2771 2772 2773 2774

	ctrl->ops->free_ctrl(ctrl);
}

void nvme_put_ctrl(struct nvme_ctrl *ctrl)
{
	kref_put(&ctrl->kref, nvme_free_ctrl);
}
2775
EXPORT_SYMBOL_GPL(nvme_put_ctrl);
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786

/*
 * Initialize a NVMe controller structures.  This needs to be called during
 * earliest initialization so that we have the initialized structured around
 * during probing.
 */
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
	int ret;

2787 2788
	ctrl->state = NVME_CTRL_NEW;
	spin_lock_init(&ctrl->lock);
2789
	INIT_LIST_HEAD(&ctrl->namespaces);
2790
	mutex_init(&ctrl->namespaces_mutex);
2791 2792 2793 2794
	kref_init(&ctrl->kref);
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->quirks = quirks;
2795
	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
2796
	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
2797
	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
2798 2799 2800 2801 2802

	ret = nvme_set_instance(ctrl);
	if (ret)
		goto out;

2803
	ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
2804
				MKDEV(nvme_char_major, ctrl->instance),
2805
				ctrl, nvme_dev_attr_groups,
2806
				"nvme%d", ctrl->instance);
2807 2808 2809 2810 2811
	if (IS_ERR(ctrl->device)) {
		ret = PTR_ERR(ctrl->device);
		goto out_release_instance;
	}
	get_device(ctrl->device);
2812
	ida_init(&ctrl->ns_ida);
2813 2814 2815 2816 2817

	spin_lock(&dev_list_lock);
	list_add_tail(&ctrl->node, &nvme_ctrl_list);
	spin_unlock(&dev_list_lock);

2818 2819 2820 2821 2822 2823 2824 2825
	/*
	 * Initialize latency tolerance controls.  The sysfs files won't
	 * be visible to userspace unless the device actually supports APST.
	 */
	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
		min(default_ps_max_latency_us, (unsigned long)S32_MAX));

2826 2827 2828 2829 2830 2831
	return 0;
out_release_instance:
	nvme_release_instance(ctrl);
out:
	return ret;
}
2832
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
2833

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
/**
 * nvme_kill_queues(): Ends all namespace queues
 * @ctrl: the dead controller that needs to end
 *
 * Call this function when the driver determines it is unable to get the
 * controller in a state capable of servicing IO.
 */
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

2845
	mutex_lock(&ctrl->namespaces_mutex);
M
Ming Lei 已提交
2846

2847
	/* Forcibly unquiesce queues to avoid blocking dispatch */
2848 2849
	if (ctrl->admin_q)
		blk_mq_unquiesce_queue(ctrl->admin_q);
2850

2851
	list_for_each_entry(ns, &ctrl->namespaces, list) {
2852 2853 2854 2855
		/*
		 * Revalidating a dead namespace sets capacity to 0. This will
		 * end buffered writers dirtying pages that can't be synced.
		 */
2856 2857 2858
		if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
			continue;
		revalidate_disk(ns->disk);
2859
		blk_set_queue_dying(ns->queue);
2860

2861 2862
		/* Forcibly unquiesce queues to avoid blocking dispatch */
		blk_mq_unquiesce_queue(ns->queue);
2863
	}
2864
	mutex_unlock(&ctrl->namespaces_mutex);
2865
}
2866
EXPORT_SYMBOL_GPL(nvme_kill_queues);
2867

K
Keith Busch 已提交
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_unfreeze_queue(ns->queue);
	mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);

void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
	struct nvme_ns *ns;

	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
		if (timeout <= 0)
			break;
	}
	mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);

void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_mq_freeze_queue_wait(ns->queue);
	mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);

void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list)
2910
		blk_freeze_queue_start(ns->queue);
K
Keith Busch 已提交
2911 2912 2913 2914
	mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);

2915
void nvme_stop_queues(struct nvme_ctrl *ctrl)
2916 2917 2918
{
	struct nvme_ns *ns;

2919
	mutex_lock(&ctrl->namespaces_mutex);
2920
	list_for_each_entry(ns, &ctrl->namespaces, list)
2921
		blk_mq_quiesce_queue(ns->queue);
2922
	mutex_unlock(&ctrl->namespaces_mutex);
2923
}
2924
EXPORT_SYMBOL_GPL(nvme_stop_queues);
2925

2926
void nvme_start_queues(struct nvme_ctrl *ctrl)
2927 2928 2929
{
	struct nvme_ns *ns;

2930
	mutex_lock(&ctrl->namespaces_mutex);
2931
	list_for_each_entry(ns, &ctrl->namespaces, list)
2932
		blk_mq_unquiesce_queue(ns->queue);
2933
	mutex_unlock(&ctrl->namespaces_mutex);
2934
}
2935
EXPORT_SYMBOL_GPL(nvme_start_queues);
2936

S
Sagi Grimberg 已提交
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
{
	if (!ctrl->ops->reinit_request)
		return 0;

	return blk_mq_tagset_iter(set, set->driver_data,
			ctrl->ops->reinit_request);
}
EXPORT_SYMBOL_GPL(nvme_reinit_tagset);

2947 2948 2949 2950
int __init nvme_core_init(void)
{
	int result;

2951 2952 2953 2954 2955
	nvme_wq = alloc_workqueue("nvme-wq",
			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!nvme_wq)
		return -ENOMEM;

2956 2957 2958
	result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
							&nvme_dev_fops);
	if (result < 0)
2959
		goto destroy_wq;
2960 2961 2962 2963 2964 2965 2966 2967 2968
	else if (result > 0)
		nvme_char_major = result;

	nvme_class = class_create(THIS_MODULE, "nvme");
	if (IS_ERR(nvme_class)) {
		result = PTR_ERR(nvme_class);
		goto unregister_chrdev;
	}

2969
	return 0;
2970

2971
unregister_chrdev:
2972
	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2973 2974
destroy_wq:
	destroy_workqueue(nvme_wq);
2975
	return result;
2976 2977 2978 2979
}

void nvme_core_exit(void)
{
2980 2981
	class_destroy(nvme_class);
	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2982
	destroy_workqueue(nvme_wq);
2983
}
2984 2985 2986 2987 2988

MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
module_init(nvme_core_init);
module_exit(nvme_core_exit);