pci.c 68.9 KB
Newer Older
M
Matthew Wilcox 已提交
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

K
Keith Busch 已提交
15
#include <linux/aer.h>
M
Matthew Wilcox 已提交
16
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
17
#include <linux/blk-mq.h>
18
#include <linux/blk-mq-pci.h>
19
#include <linux/dmi.h>
M
Matthew Wilcox 已提交
20 21 22 23 24
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/once.h>
M
Matthew Wilcox 已提交
27
#include <linux/pci.h>
K
Keith Busch 已提交
28
#include <linux/t10-pi.h>
M
Matthew Wilcox 已提交
29
#include <linux/types.h>
30
#include <linux/io-64-nonatomic-lo-hi.h>
31
#include <linux/sed-opal.h>
32

33 34
#include "nvme.h"

M
Matthew Wilcox 已提交
35 36
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
37

C
Chaitanya Kulkarni 已提交
38
#define SGES_PER_PAGE	(PAGE_SIZE / sizeof(struct nvme_sgl_desc))
39

40 41 42
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

43 44 45 46
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

47 48 49 50
static unsigned int max_host_mem_size_mb = 128;
module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
51

C
Chaitanya Kulkarni 已提交
52 53 54 55 56 57
static unsigned int sgl_threshold = SZ_32K;
module_param(sgl_threshold, uint, 0644);
MODULE_PARM_DESC(sgl_threshold,
		"Use SGLs when average request segment size is larger or equal to "
		"this size. Use 0 to disable SGLs.");

58 59 60 61 62 63 64 65 66 67
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
	.set = io_queue_depth_set,
	.get = param_get_int,
};

static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");

68 69
struct nvme_dev;
struct nvme_queue;
70

J
Jens Axboe 已提交
71
static void nvme_process_cq(struct nvme_queue *nvmeq);
72
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
73

74 75 76 77
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
78
	struct nvme_queue *queues;
79 80 81 82 83 84 85 86 87 88 89
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
	u32 db_stride;
	void __iomem *bar;
90
	unsigned long bar_mapped_size;
91
	struct work_struct remove_work;
92
	struct mutex shutdown_lock;
93 94
	bool subsystem;
	void __iomem *cmb;
95
	pci_bus_addr_t cmb_bus_addr;
96 97
	u64 cmb_size;
	u32 cmbsz;
98
	u32 cmbloc;
99
	struct nvme_ctrl ctrl;
K
Keith Busch 已提交
100
	struct completion ioq_wait;
101 102

	/* shadow doorbell buffer support: */
103 104 105 106
	u32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	u32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;
107 108 109 110

	/* host memory buffer support: */
	u64 host_mem_size;
	u32 nr_host_mem_descs;
111
	dma_addr_t host_mem_descs_dma;
112 113
	struct nvme_host_mem_buf_desc *host_mem_descs;
	void **host_mem_desc_bufs;
K
Keith Busch 已提交
114
};
115

116 117 118 119 120 121 122 123 124 125 126
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
	int n = 0, ret;

	ret = kstrtoint(val, 10, &n);
	if (ret != 0 || n < 2)
		return -EINVAL;

	return param_set_int(val, kp);
}

127 128 129 130 131 132 133 134 135 136
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
	return qid * 2 * stride;
}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{
	return (qid * 2 + 1) * stride;
}

137 138 139 140 141
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

M
Matthew Wilcox 已提交
142 143 144 145 146 147
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
148
	struct nvme_dev *dev;
M
Matthew Wilcox 已提交
149 150
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
151
	struct nvme_command __iomem *sq_cmds_io;
M
Matthew Wilcox 已提交
152
	volatile struct nvme_completion *cqes;
153
	struct blk_mq_tags **tags;
M
Matthew Wilcox 已提交
154 155 156 157
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
J
Jens Axboe 已提交
158
	s16 cq_vector;
M
Matthew Wilcox 已提交
159 160
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
161
	u16 qid;
162 163
	u8 cq_phase;
	u8 cqe_seen;
164 165 166 167
	u32 *dbbuf_sq_db;
	u32 *dbbuf_cq_db;
	u32 *dbbuf_sq_ei;
	u32 *dbbuf_cq_ei;
M
Matthew Wilcox 已提交
168 169
};

170 171 172
/*
 * The nvme_iod describes the data in an I/O, including the list of PRP
 * entries.  You can't see it in this data structure because C doesn't let
C
Christoph Hellwig 已提交
173
 * me express that.  Use nvme_init_iod to ensure there's enough space
174 175 176
 * allocated to store the PRP list.
 */
struct nvme_iod {
177
	struct nvme_request req;
C
Christoph Hellwig 已提交
178
	struct nvme_queue *nvmeq;
C
Chaitanya Kulkarni 已提交
179
	bool use_sgl;
C
Christoph Hellwig 已提交
180
	int aborted;
181 182 183 184
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	int length;		/* Of data, in bytes */
	dma_addr_t first_dma;
185
	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
C
Christoph Hellwig 已提交
186 187
	struct scatterlist *sg;
	struct scatterlist inline_sg[0];
M
Matthew Wilcox 已提交
188 189 190 191 192 193 194 195 196 197 198 199
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
200
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
201
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
202
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
203 204
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
M
Matthew Wilcox 已提交
205
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
206
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}

static inline unsigned int nvme_dbbuf_size(u32 stride)
{
	return ((num_possible_cpus() + 1) * 8 * stride);
}

static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs)
		return 0;

	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_dbs_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_dbs)
		return -ENOMEM;
	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_eis_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
	}
	if (dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
		dev->dbbuf_eis = NULL;
	}
}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{
	if (!dev->dbbuf_dbs || !qid)
		return;

	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{
	struct nvme_command c;

	if (!dev->dbbuf_dbs)
		return;

	memset(&c, 0, sizeof(c));
	c.dbbuf.opcode = nvme_admin_dbbuf;
	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);

	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
281
		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		/* Free memory and continue on */
		nvme_dbbuf_dma_free(dev);
	}
}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
					      volatile u32 *dbbuf_ei)
{
	if (dbbuf_db) {
		u16 old_value;

		/*
		 * Ensure that the queue is written before updating
		 * the doorbell in memory
		 */
		wmb();

		old_value = *dbbuf_db;
		*dbbuf_db = value;

		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
			return false;
	}

	return true;
M
Matthew Wilcox 已提交
313 314
}

315 316 317 318
/*
 * Max size of iod being embedded in the request payload
 */
#define NVME_INT_PAGES		2
319
#define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
320 321 322 323 324 325 326 327

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
328 329
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
330 331 332
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

C
Chaitanya Kulkarni 已提交
333 334 335 336 337
/*
 * Calculates the number of pages needed for the SGL segments. For example a 4k
 * page can accommodate 256 SGL descriptors.
 */
static int nvme_pci_npages_sgl(unsigned int num_seg)
338
{
C
Chaitanya Kulkarni 已提交
339
	return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
C
Christoph Hellwig 已提交
340
}
341

C
Chaitanya Kulkarni 已提交
342 343
static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg, bool use_sgl)
C
Christoph Hellwig 已提交
344
{
C
Chaitanya Kulkarni 已提交
345 346 347 348 349 350 351 352
	size_t alloc_size;

	if (use_sgl)
		alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
	else
		alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);

	return alloc_size + sizeof(struct scatterlist) * nseg;
C
Christoph Hellwig 已提交
353
}
354

C
Chaitanya Kulkarni 已提交
355
static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
C
Christoph Hellwig 已提交
356
{
C
Chaitanya Kulkarni 已提交
357 358 359 360 361
	unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
				    NVME_INT_BYTES(dev), NVME_INT_PAGES,
				    use_sgl);

	return sizeof(struct nvme_iod) + alloc_size;
362 363
}

M
Matias Bjørling 已提交
364 365
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
366
{
M
Matias Bjørling 已提交
367
	struct nvme_dev *dev = data;
368
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
369

370 371 372 373
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

M
Matias Bjørling 已提交
374
	hctx->driver_data = nvmeq;
375
	nvmeq->tags = &dev->admin_tagset.tags[0];
M
Matias Bjørling 已提交
376
	return 0;
377 378
}

379 380 381 382 383 384 385
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

M
Matias Bjørling 已提交
386 387
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
388
{
M
Matias Bjørling 已提交
389
	struct nvme_dev *dev = data;
390
	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
M
Matias Bjørling 已提交
391

392 393
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
M
Matthew Wilcox 已提交
394

395
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
M
Matias Bjørling 已提交
396 397
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
398 399
}

400 401
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
		unsigned int hctx_idx, unsigned int numa_node)
M
Matthew Wilcox 已提交
402
{
403
	struct nvme_dev *dev = set->driver_data;
C
Christoph Hellwig 已提交
404
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
405
	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
406
	struct nvme_queue *nvmeq = &dev->queues[queue_idx];
M
Matias Bjørling 已提交
407 408

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
409
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
410 411 412
	return 0;
}

413 414 415 416 417 418 419
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_dev *dev = set->driver_data;

	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
}

M
Matthew Wilcox 已提交
420
/**
421
 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
422 423 424 425 426
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
427 428
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
						struct nvme_command *cmd)
M
Matthew Wilcox 已提交
429
{
M
Matias Bjørling 已提交
430 431
	u16 tail = nvmeq->sq_tail;

432 433 434 435 436
	if (nvmeq->sq_cmds_io)
		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
	else
		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));

M
Matthew Wilcox 已提交
437 438
	if (++tail == nvmeq->q_depth)
		tail = 0;
439 440 441
	if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
					      nvmeq->dbbuf_sq_ei))
		writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
442 443 444
	nvmeq->sq_tail = tail;
}

C
Chaitanya Kulkarni 已提交
445
static void **nvme_pci_iod_list(struct request *req)
M
Matthew Wilcox 已提交
446
{
C
Christoph Hellwig 已提交
447
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Chaitanya Kulkarni 已提交
448
	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
M
Matthew Wilcox 已提交
449 450
}

451 452 453
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454
	int nseg = blk_rq_nr_phys_segments(req);
455 456
	unsigned int avg_seg_size;

457 458 459 460
	if (nseg == 0)
		return false;

	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
461 462 463 464 465 466 467 468 469 470

	if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
		return false;
	if (!iod->nvmeq->qid)
		return false;
	if (!sgl_threshold || avg_seg_size < sgl_threshold)
		return false;
	return true;
}

471
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
472
{
C
Christoph Hellwig 已提交
473
	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
474
	int nseg = blk_rq_nr_phys_segments(rq);
475
	unsigned int size = blk_rq_payload_bytes(rq);
476

477 478
	iod->use_sgl = nvme_pci_use_sgls(dev, rq);

C
Christoph Hellwig 已提交
479
	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
C
Chaitanya Kulkarni 已提交
480 481 482 483
		size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
				iod->use_sgl);

		iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
C
Christoph Hellwig 已提交
484
		if (!iod->sg)
485
			return BLK_STS_RESOURCE;
C
Christoph Hellwig 已提交
486 487
	} else {
		iod->sg = iod->inline_sg;
488 489
	}

C
Christoph Hellwig 已提交
490 491 492 493
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->length = size;
K
Keith Busch 已提交
494

495
	return BLK_STS_OK;
496 497
}

C
Christoph Hellwig 已提交
498
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
499
{
C
Christoph Hellwig 已提交
500
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Chaitanya Kulkarni 已提交
501 502 503
	const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;

504 505 506
	int i;

	if (iod->npages == 0)
C
Chaitanya Kulkarni 已提交
507 508 509
		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
			dma_addr);

510
	for (i = 0; i < iod->npages; i++) {
C
Chaitanya Kulkarni 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		void *addr = nvme_pci_iod_list(req)[i];

		if (iod->use_sgl) {
			struct nvme_sgl_desc *sg_list = addr;

			next_dma_addr =
			    le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
		} else {
			__le64 *prp_list = addr;

			next_dma_addr = le64_to_cpu(prp_list[last_prp]);
		}

		dma_pool_free(dev->prp_page_pool, addr, dma_addr);
		dma_addr = next_dma_addr;
526
	}
527

C
Christoph Hellwig 已提交
528 529
	if (iod->sg != iod->inline_sg)
		kfree(iod->sg);
K
Keith Busch 已提交
530 531
}

532
#ifdef CONFIG_BLK_DEV_INTEGRITY
K
Keith Busch 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == v)
		pi->ref_tag = cpu_to_be32(p);
}

static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == p)
		pi->ref_tag = cpu_to_be32(v);
}

/**
 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
 *
 * The virtual start sector is the one that was originally submitted by the
 * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
 * start sector may be different. Remap protection information to match the
 * physical LBA on writes, and back to the original seed on reads.
 *
 * Type 0 and 3 do not have a ref tag, so no remapping required.
 */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
	struct nvme_ns *ns = req->rq_disk->private_data;
	struct bio_integrity_payload *bip;
	struct t10_pi_tuple *pi;
	void *p, *pmap;
	u32 i, nlb, ts, phys, virt;

	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
		return;

	bip = bio_integrity(req->bio);
	if (!bip)
		return;

	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;

	p = pmap;
	virt = bip_get_seed(bip);
	phys = nvme_block_nr(ns, blk_rq_pos(req));
	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
577
	ts = ns->disk->queue->integrity.tuple_size;
K
Keith Busch 已提交
578 579 580 581 582 583 584 585

	for (i = 0; i < nlb; i++, virt++, phys++) {
		pi = (struct t10_pi_tuple *)p;
		dif_swap(phys, virt, pi);
		p += ts;
	}
	kunmap_atomic(pmap);
}
586 587 588 589 590 591 592 593 594 595 596 597 598
#else /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
}
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
#endif

599 600 601 602 603 604 605 606 607 608 609 610 611 612
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		dma_addr_t phys = sg_phys(sg);
		pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
			"dma_address:%pad dma_length:%d\n",
			i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
			sg_dma_len(sg));
	}
}

C
Chaitanya Kulkarni 已提交
613 614
static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd)
M
Matthew Wilcox 已提交
615
{
C
Christoph Hellwig 已提交
616
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
617
	struct dma_pool *pool;
618
	int length = blk_rq_payload_bytes(req);
619
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
620 621
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
622
	u32 page_size = dev->ctrl.page_size;
623
	int offset = dma_addr & (page_size - 1);
624
	__le64 *prp_list;
C
Chaitanya Kulkarni 已提交
625
	void **list = nvme_pci_iod_list(req);
626
	dma_addr_t prp_dma;
627
	int nprps, i;
M
Matthew Wilcox 已提交
628

629
	length -= (page_size - offset);
630 631
	if (length <= 0) {
		iod->first_dma = 0;
C
Chaitanya Kulkarni 已提交
632
		goto done;
633
	}
M
Matthew Wilcox 已提交
634

635
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
636
	if (dma_len) {
637
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
638 639 640 641 642 643
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

644
	if (length <= page_size) {
645
		iod->first_dma = dma_addr;
C
Chaitanya Kulkarni 已提交
646
		goto done;
647 648
	}

649
	nprps = DIV_ROUND_UP(length, page_size);
650 651
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
652
		iod->npages = 0;
653 654
	} else {
		pool = dev->prp_page_pool;
655
		iod->npages = 1;
656 657
	}

658
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
659
	if (!prp_list) {
660
		iod->first_dma = dma_addr;
661
		iod->npages = -1;
662
		return BLK_STS_RESOURCE;
663
	}
664 665
	list[0] = prp_list;
	iod->first_dma = prp_dma;
666 667
	i = 0;
	for (;;) {
668
		if (i == page_size >> 3) {
669
			__le64 *old_prp_list = prp_list;
670
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
671
			if (!prp_list)
672
				return BLK_STS_RESOURCE;
673
			list[iod->npages++] = prp_list;
674 675 676
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
677 678
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
679 680 681
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
682 683 684 685
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
686 687
		if (unlikely(dma_len < 0))
			goto bad_sgl;
688 689 690
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
691 692
	}

C
Chaitanya Kulkarni 已提交
693 694 695 696
done:
	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);

697 698 699
	return BLK_STS_OK;

 bad_sgl:
700 701 702
	WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
			"Invalid SGL for payload:%d nents:%d\n",
			blk_rq_payload_bytes(req), iod->nents);
703
	return BLK_STS_IOERR;
M
Matthew Wilcox 已提交
704 705
}

C
Chaitanya Kulkarni 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
		struct scatterlist *sg)
{
	sge->addr = cpu_to_le64(sg_dma_address(sg));
	sge->length = cpu_to_le32(sg_dma_len(sg));
	sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}

static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
		dma_addr_t dma_addr, int entries)
{
	sge->addr = cpu_to_le64(dma_addr);
	if (entries < SGES_PER_PAGE) {
		sge->length = cpu_to_le32(entries * sizeof(*sge));
		sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
	} else {
		sge->length = cpu_to_le32(PAGE_SIZE);
		sge->type = NVME_SGL_FMT_SEG_DESC << 4;
	}
}

static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
728
		struct request *req, struct nvme_rw_command *cmd, int entries)
C
Chaitanya Kulkarni 已提交
729 730 731 732 733 734
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct dma_pool *pool;
	struct nvme_sgl_desc *sg_list;
	struct scatterlist *sg = iod->sg;
	dma_addr_t sgl_dma;
735
	int i = 0;
C
Chaitanya Kulkarni 已提交
736 737 738 739

	/* setting the transfer type as SGL */
	cmd->flags = NVME_CMD_SGL_METABUF;

740
	if (entries == 1) {
C
Chaitanya Kulkarni 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
		nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
		return BLK_STS_OK;
	}

	if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
		pool = dev->prp_small_pool;
		iod->npages = 0;
	} else {
		pool = dev->prp_page_pool;
		iod->npages = 1;
	}

	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
	if (!sg_list) {
		iod->npages = -1;
		return BLK_STS_RESOURCE;
	}

	nvme_pci_iod_list(req)[0] = sg_list;
	iod->first_dma = sgl_dma;

	nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);

	do {
		if (i == SGES_PER_PAGE) {
			struct nvme_sgl_desc *old_sg_desc = sg_list;
			struct nvme_sgl_desc *link = &old_sg_desc[i - 1];

			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
			if (!sg_list)
				return BLK_STS_RESOURCE;

			i = 0;
			nvme_pci_iod_list(req)[iod->npages++] = sg_list;
			sg_list[i++] = *link;
			nvme_pci_sgl_set_seg(link, sgl_dma, entries);
		}

		nvme_pci_sgl_set_data(&sg_list[i++], sg);
		sg = sg_next(sg);
781
	} while (--entries > 0);
C
Chaitanya Kulkarni 已提交
782 783 784 785

	return BLK_STS_OK;
}

786
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
787
		struct nvme_command *cmnd)
788
{
C
Christoph Hellwig 已提交
789
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
790 791 792
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
793
	blk_status_t ret = BLK_STS_IOERR;
794
	int nr_mapped;
795

796
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
C
Christoph Hellwig 已提交
797 798 799
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
800

801
	ret = BLK_STS_RESOURCE;
802 803 804
	nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
			DMA_ATTR_NO_WARN);
	if (!nr_mapped)
C
Christoph Hellwig 已提交
805
		goto out;
806

807
	if (iod->use_sgl)
808
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
C
Chaitanya Kulkarni 已提交
809 810 811
	else
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);

812
	if (ret != BLK_STS_OK)
C
Christoph Hellwig 已提交
813
		goto out_unmap;
814

815
	ret = BLK_STS_IOERR;
C
Christoph Hellwig 已提交
816 817 818
	if (blk_integrity_rq(req)) {
		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
			goto out_unmap;
819

820 821
		sg_init_table(&iod->meta_sg, 1);
		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
C
Christoph Hellwig 已提交
822
			goto out_unmap;
823

824
		if (req_op(req) == REQ_OP_WRITE)
C
Christoph Hellwig 已提交
825
			nvme_dif_remap(req, nvme_dif_prep);
826

827
		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
C
Christoph Hellwig 已提交
828
			goto out_unmap;
829
	}
M
Matthew Wilcox 已提交
830

C
Christoph Hellwig 已提交
831
	if (blk_integrity_rq(req))
832
		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
833
	return BLK_STS_OK;
M
Matthew Wilcox 已提交
834

C
Christoph Hellwig 已提交
835 836 837 838
out_unmap:
	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
	return ret;
M
Matthew Wilcox 已提交
839 840
}

C
Christoph Hellwig 已提交
841
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
842
{
C
Christoph Hellwig 已提交
843
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
844 845 846 847 848 849
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
850
			if (req_op(req) == REQ_OP_READ)
851
				nvme_dif_remap(req, nvme_dif_complete);
852
			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
K
Keith Busch 已提交
853
		}
854
	}
K
Keith Busch 已提交
855

856
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
857
	nvme_free_iod(dev, req);
858
}
M
Matthew Wilcox 已提交
859

860 861 862
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
863
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
M
Matias Bjørling 已提交
864
			 const struct blk_mq_queue_data *bd)
865
{
M
Matias Bjørling 已提交
866 867
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
868
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
869
	struct request *req = bd->rq;
C
Christoph Hellwig 已提交
870
	struct nvme_command cmnd;
871
	blk_status_t ret;
K
Keith Busch 已提交
872

873
	ret = nvme_setup_cmd(ns, req, &cmnd);
874
	if (ret)
C
Christoph Hellwig 已提交
875
		return ret;
M
Matias Bjørling 已提交
876

877
	ret = nvme_init_iod(req, dev);
878
	if (ret)
879
		goto out_free_cmd;
M
Matias Bjørling 已提交
880

881
	if (blk_rq_nr_phys_segments(req)) {
882
		ret = nvme_map_data(dev, req, &cmnd);
883 884 885
		if (ret)
			goto out_cleanup_iod;
	}
M
Matias Bjørling 已提交
886

887
	blk_mq_start_request(req);
M
Matias Bjørling 已提交
888

C
Christoph Hellwig 已提交
889
	spin_lock_irq(&nvmeq->q_lock);
890
	if (unlikely(nvmeq->cq_vector < 0)) {
891
		ret = BLK_STS_IOERR;
892
		spin_unlock_irq(&nvmeq->q_lock);
893
		goto out_cleanup_iod;
894
	}
C
Christoph Hellwig 已提交
895
	__nvme_submit_cmd(nvmeq, &cmnd);
M
Matias Bjørling 已提交
896 897
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
898
	return BLK_STS_OK;
899
out_cleanup_iod:
C
Christoph Hellwig 已提交
900
	nvme_free_iod(dev, req);
901 902
out_free_cmd:
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
903
	return ret;
M
Matthew Wilcox 已提交
904
}
K
Keith Busch 已提交
905

906
static void nvme_pci_complete_rq(struct request *req)
907
{
C
Christoph Hellwig 已提交
908
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
909

910 911
	nvme_unmap_data(iod->nvmeq->dev, req);
	nvme_complete_rq(req);
M
Matthew Wilcox 已提交
912 913
}

914 915 916 917 918 919 920
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
		u16 phase)
{
	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}

921
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
922
{
923
	u16 head = nvmeq->cq_head;
924

925 926 927 928 929 930
	if (likely(nvmeq->cq_vector >= 0)) {
		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
						      nvmeq->dbbuf_cq_ei))
			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
	}
}
931

932 933 934 935
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
		struct nvme_completion *cqe)
{
	struct request *req;
936

937 938 939 940 941
	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
		dev_warn(nvmeq->dev->ctrl.device,
			"invalid id %d completed on queue %d\n",
			cqe->command_id, le16_to_cpu(cqe->sq_id));
		return;
M
Matthew Wilcox 已提交
942 943
	}

944 945 946 947 948 949 950
	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
	if (unlikely(nvmeq->qid == 0 &&
K
Keith Busch 已提交
951
			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
952 953
		nvme_complete_async_event(&nvmeq->dev->ctrl,
				cqe->status, &cqe->result);
J
Jens Axboe 已提交
954
		return;
955
	}
M
Matthew Wilcox 已提交
956

957
	nvmeq->cqe_seen = 1;
958 959 960
	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
	nvme_end_request(req, cqe->status, cqe->result);
}
M
Matthew Wilcox 已提交
961

962 963
static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
		struct nvme_completion *cqe)
M
Matthew Wilcox 已提交
964
{
965 966
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
		*cqe = nvmeq->cqes[nvmeq->cq_head];
967

968 969 970
		if (++nvmeq->cq_head == nvmeq->q_depth) {
			nvmeq->cq_head = 0;
			nvmeq->cq_phase = !nvmeq->cq_phase;
M
Matthew Wilcox 已提交
971
		}
972
		return true;
M
Matthew Wilcox 已提交
973
	}
974
	return false;
J
Jens Axboe 已提交
975 976 977 978
}

static void nvme_process_cq(struct nvme_queue *nvmeq)
{
979 980
	struct nvme_completion cqe;
	int consumed = 0;
M
Matthew Wilcox 已提交
981

982 983 984 985
	while (nvme_read_cqe(nvmeq, &cqe)) {
		nvme_handle_cqe(nvmeq, &cqe);
		consumed++;
	}
986

987
	if (consumed)
988
		nvme_ring_cq_doorbell(nvmeq);
M
Matthew Wilcox 已提交
989 990 991
}

static irqreturn_t nvme_irq(int irq, void *data)
992 993 994 995
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
996 997 998
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
999 1000 1001 1002 1003 1004 1005
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
1006 1007 1008
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
1009 1010
}

K
Keith Busch 已提交
1011
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
J
Jens Axboe 已提交
1012
{
1013 1014
	struct nvme_completion cqe;
	int found = 0, consumed = 0;
J
Jens Axboe 已提交
1015

1016 1017
	if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return 0;
J
Jens Axboe 已提交
1018

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	spin_lock_irq(&nvmeq->q_lock);
	while (nvme_read_cqe(nvmeq, &cqe)) {
		nvme_handle_cqe(nvmeq, &cqe);
		consumed++;

		if (tag == cqe.command_id) {
			found = 1;
			break;
		}
       }

	if (consumed)
		nvme_ring_cq_doorbell(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);

	return found;
J
Jens Axboe 已提交
1035 1036
}

K
Keith Busch 已提交
1037 1038 1039 1040 1041 1042 1043
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	return __nvme_poll(nvmeq, tag);
}

1044
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
M
Matthew Wilcox 已提交
1045
{
1046
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1047
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
1048
	struct nvme_command c;
M
Matthew Wilcox 已提交
1049

M
Matias Bjørling 已提交
1050 1051
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
1052
	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1053

1054
	spin_lock_irq(&nvmeq->q_lock);
1055
	__nvme_submit_cmd(nvmeq, &c);
1056
	spin_unlock_irq(&nvmeq->q_lock);
1057 1058
}

M
Matthew Wilcox 已提交
1059
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1060
{
M
Matthew Wilcox 已提交
1061 1062 1063 1064 1065 1066
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

1067
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1068 1069 1070 1071 1072 1073 1074 1075
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

1076
	/*
M
Minwoo Im 已提交
1077
	 * Note: we (ab)use the fact that the prp fields survive if no data
1078 1079
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1080 1081 1082 1083 1084 1085 1086 1087
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

1088
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1089 1090 1091 1092 1093 1094
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
1095
	int flags = NVME_QUEUE_PHYS_CONTIG;
M
Matthew Wilcox 已提交
1096

1097
	/*
M
Minwoo Im 已提交
1098
	 * Note: we (ab)use the fact that the prp fields survive if no data
1099 1100
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1101 1102 1103 1104 1105 1106 1107 1108
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

1109
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

1122
static void abort_endio(struct request *req, blk_status_t error)
1123
{
C
Christoph Hellwig 已提交
1124 1125
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
1126

1127 1128
	dev_warn(nvmeq->dev->ctrl.device,
		 "Abort status: 0x%x", nvme_req(req)->status);
1129 1130
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
1131 1132
}

K
Keith Busch 已提交
1133 1134 1135 1136 1137 1138 1139 1140
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{

	/* If true, indicates loss of adapter communication, possibly by a
	 * NVMe Subsystem reset.
	 */
	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);

1141 1142 1143
	/* If there is a reset/reinit ongoing, we shouldn't reset again. */
	switch (dev->ctrl.state) {
	case NVME_CTRL_RESETTING:
1144
	case NVME_CTRL_CONNECTING:
K
Keith Busch 已提交
1145
		return false;
1146 1147 1148
	default:
		break;
	}
K
Keith Busch 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

	/* We shouldn't reset unless the controller is on fatal error state
	 * _or_ if we lost the communication with it.
	 */
	if (!(csts & NVME_CSTS_CFS) && !nssro)
		return false;

	/* If PCI error recovery process is happening, we cannot reset or
	 * the recovery mechanism will surely fail.
	 */
	if (pci_channel_offline(to_pci_dev(dev->dev)))
		return false;

	return true;
}

static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
	/* Read a config register to help see what died. */
	u16 pci_status;
	int result;

	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
				      &pci_status);
	if (result == PCIBIOS_SUCCESSFUL)
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
			 csts, pci_status);
	else
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
			 csts, result);
}

1183
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
K
Keith Busch 已提交
1184
{
C
Christoph Hellwig 已提交
1185 1186
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
K
Keith Busch 已提交
1187
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
1188 1189
	struct request *abort_req;
	struct nvme_command cmd;
K
Keith Busch 已提交
1190 1191 1192 1193 1194 1195 1196 1197
	u32 csts = readl(dev->bar + NVME_REG_CSTS);

	/*
	 * Reset immediately if the controller is failed
	 */
	if (nvme_should_reset(dev, csts)) {
		nvme_warn_reset(dev, csts);
		nvme_dev_disable(dev, false);
1198
		nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1199 1200
		return BLK_EH_HANDLED;
	}
K
Keith Busch 已提交
1201

K
Keith Busch 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	/*
	 * Did we miss an interrupt?
	 */
	if (__nvme_poll(nvmeq, req->tag)) {
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
		return BLK_EH_HANDLED;
	}

1212
	/*
1213 1214 1215 1216 1217
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 */
1218
	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
1219
		dev_warn(dev->ctrl.device,
1220 1221
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
1222
		nvme_dev_disable(dev, false);
1223
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1224
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
1225 1226
	}

1227 1228 1229 1230
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
1231
	 */
C
Christoph Hellwig 已提交
1232
	if (!nvmeq->qid || iod->aborted) {
1233
		dev_warn(dev->ctrl.device,
1234 1235
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
1236
		nvme_dev_disable(dev, false);
1237
		nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1238

1239 1240 1241 1242
		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
1243
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1244
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
1245 1246
	}

1247
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1248
		atomic_inc(&dev->ctrl.abort_limit);
1249
		return BLK_EH_RESET_TIMER;
1250
	}
1251
	iod->aborted = 1;
M
Matias Bjørling 已提交
1252

K
Keith Busch 已提交
1253 1254
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
1255
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
1256 1257
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

1258 1259 1260
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
1261 1262

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1263
			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1264 1265 1266 1267 1268 1269 1270 1271
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
K
Keith Busch 已提交
1272

1273 1274 1275 1276 1277 1278
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
K
Keith Busch 已提交
1279 1280
}

M
Matias Bjørling 已提交
1281 1282
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
1283 1284
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1285 1286
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1287 1288 1289
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}

1290
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1291 1292 1293
{
	int i;

1294 1295
	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
		dev->ctrl.queue_count--;
1296
		nvme_free_queue(&dev->queues[i]);
1297
	}
1298 1299
}

K
Keith Busch 已提交
1300 1301 1302 1303 1304
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
1305
{
K
Keith Busch 已提交
1306
	int vector;
M
Matthew Wilcox 已提交
1307

1308
	spin_lock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
1309 1310 1311 1312
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
1313
	vector = nvmeq->cq_vector;
K
Keith Busch 已提交
1314
	nvmeq->dev->online_queues--;
K
Keith Busch 已提交
1315
	nvmeq->cq_vector = -1;
1316 1317
	spin_unlock_irq(&nvmeq->q_lock);

1318
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1319
		blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1320

1321
	pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
M
Matthew Wilcox 已提交
1322

K
Keith Busch 已提交
1323 1324
	return 0;
}
M
Matthew Wilcox 已提交
1325

1326
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
K
Keith Busch 已提交
1327
{
1328
	struct nvme_queue *nvmeq = &dev->queues[0];
K
Keith Busch 已提交
1329

1330 1331 1332
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
1333
		nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1334 1335 1336 1337

	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1338 1339
}

1340 1341 1342 1343
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1344 1345
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1346 1347

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1348
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1349
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1350
		q_depth = div_u64(mem_per_q, entry_size);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
1367 1368 1369
	/* CMB SQEs will be mapped before creation */
	if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
		return 0;
1370

1371 1372 1373 1374
	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
					    &nvmeq->sq_dma_addr, GFP_KERNEL);
	if (!nvmeq->sq_cmds)
		return -ENOMEM;
1375 1376 1377
	return 0;
}

1378 1379
static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
		int depth, int node)
M
Matthew Wilcox 已提交
1380
{
1381
	struct nvme_queue *nvmeq = &dev->queues[qid];
M
Matthew Wilcox 已提交
1382

1383 1384
	if (dev->ctrl.queue_count > qid)
		return 0;
M
Matthew Wilcox 已提交
1385

1386
	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
J
Joe Perches 已提交
1387
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1388 1389 1390
	if (!nvmeq->cqes)
		goto free_nvmeq;

1391
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
M
Matthew Wilcox 已提交
1392 1393
		goto free_cqdma;

1394
	nvmeq->q_dmadev = dev->dev;
M
Matthew Wilcox 已提交
1395
	nvmeq->dev = dev;
M
Matthew Wilcox 已提交
1396 1397
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1398
	nvmeq->cq_phase = 1;
1399
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1400
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1401
	nvmeq->qid = qid;
1402
	nvmeq->cq_vector = -1;
1403
	dev->ctrl.queue_count++;
1404

1405
	return 0;
M
Matthew Wilcox 已提交
1406 1407

 free_cqdma:
1408
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1409 1410
							nvmeq->cq_dma_addr);
 free_nvmeq:
1411
	return -ENOMEM;
M
Matthew Wilcox 已提交
1412 1413
}

1414
static int queue_request_irq(struct nvme_queue *nvmeq)
1415
{
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
	int nr = nvmeq->dev->ctrl.instance;

	if (use_threaded_interrupts) {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	} else {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	}
1426 1427
}

1428
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1429
{
1430
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1431

1432
	spin_lock_irq(&nvmeq->q_lock);
1433 1434 1435
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1436
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1437
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1438
	nvme_dbbuf_init(dev, nvmeq, qid);
K
Keith Busch 已提交
1439
	dev->online_queues++;
1440
	spin_unlock_irq(&nvmeq->q_lock);
1441 1442 1443 1444 1445 1446
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1447

1448 1449 1450 1451 1452 1453 1454
	if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
						      dev->ctrl.page_size);
		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
		nvmeq->sq_cmds_io = dev->cmb + offset;
	}

K
Keith Busch 已提交
1455
	nvmeq->cq_vector = qid - 1;
M
Matthew Wilcox 已提交
1456 1457
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1458
		return result;
M
Matthew Wilcox 已提交
1459 1460 1461 1462 1463

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1464
	nvme_init_queue(nvmeq, qid);
1465
	result = queue_request_irq(nvmeq);
M
Matthew Wilcox 已提交
1466 1467 1468
	if (result < 0)
		goto release_sq;

1469
	return result;
M
Matthew Wilcox 已提交
1470 1471 1472 1473 1474

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1475
	return result;
M
Matthew Wilcox 已提交
1476 1477
}

1478
static const struct blk_mq_ops nvme_mq_admin_ops = {
1479
	.queue_rq	= nvme_queue_rq,
1480
	.complete	= nvme_pci_complete_rq,
M
Matias Bjørling 已提交
1481
	.init_hctx	= nvme_admin_init_hctx,
1482
	.exit_hctx      = nvme_admin_exit_hctx,
1483
	.init_request	= nvme_init_request,
M
Matias Bjørling 已提交
1484 1485 1486
	.timeout	= nvme_timeout,
};

1487
static const struct blk_mq_ops nvme_mq_ops = {
M
Matias Bjørling 已提交
1488
	.queue_rq	= nvme_queue_rq,
1489
	.complete	= nvme_pci_complete_rq,
M
Matias Bjørling 已提交
1490 1491
	.init_hctx	= nvme_init_hctx,
	.init_request	= nvme_init_request,
1492
	.map_queues	= nvme_pci_map_queues,
M
Matias Bjørling 已提交
1493
	.timeout	= nvme_timeout,
J
Jens Axboe 已提交
1494
	.poll		= nvme_poll,
M
Matias Bjørling 已提交
1495 1496
};

1497 1498
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
1499
	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1500 1501 1502 1503 1504
		/*
		 * If the controller was reset during removal, it's possible
		 * user requests may be waiting on a stopped queue. Start the
		 * queue to flush these to completion.
		 */
1505
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1506
		blk_cleanup_queue(dev->ctrl.admin_q);
1507 1508 1509 1510
		blk_mq_free_tag_set(&dev->admin_tagset);
	}
}

M
Matias Bjørling 已提交
1511 1512
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
1513
	if (!dev->ctrl.admin_q) {
M
Matias Bjørling 已提交
1514 1515
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
K
Keith Busch 已提交
1516

K
Keith Busch 已提交
1517
		dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
M
Matias Bjørling 已提交
1518
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1519
		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
C
Chaitanya Kulkarni 已提交
1520
		dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
1521
		dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
M
Matias Bjørling 已提交
1522 1523 1524 1525
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;
1526
		dev->ctrl.admin_tagset = &dev->admin_tagset;
M
Matias Bjørling 已提交
1527

1528 1529
		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
		if (IS_ERR(dev->ctrl.admin_q)) {
M
Matias Bjørling 已提交
1530 1531 1532
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
1533
		if (!blk_get_queue(dev->ctrl.admin_q)) {
1534
			nvme_dev_remove_admin(dev);
1535
			dev->ctrl.admin_q = NULL;
1536 1537
			return -ENODEV;
		}
K
Keith Busch 已提交
1538
	} else
1539
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
M
Matias Bjørling 已提交
1540 1541 1542 1543

	return 0;
}

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
}

static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	if (size <= dev->bar_mapped_size)
		return 0;
	if (size > pci_resource_len(pdev, 0))
		return -ENOMEM;
	if (dev->bar)
		iounmap(dev->bar);
	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
	if (!dev->bar) {
		dev->bar_mapped_size = 0;
		return -ENOMEM;
	}
	dev->bar_mapped_size = size;
	dev->dbs = dev->bar + NVME_REG_DBS;

	return 0;
}

1570
static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1571
{
1572
	int result;
M
Matthew Wilcox 已提交
1573 1574 1575
	u32 aqa;
	struct nvme_queue *nvmeq;

1576 1577 1578 1579
	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
	if (result < 0)
		return result;

1580
	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1581
				NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1582

1583 1584 1585
	if (dev->subsystem &&
	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1586

1587
	result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1588 1589
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1590

1591 1592 1593 1594
	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
			dev_to_node(dev->dev));
	if (result)
		return result;
M
Matthew Wilcox 已提交
1595

1596
	nvmeq = &dev->queues[0];
M
Matthew Wilcox 已提交
1597 1598 1599
	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1600 1601 1602
	writel(aqa, dev->bar + NVME_REG_AQA);
	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
M
Matthew Wilcox 已提交
1603

1604
	result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
1605
	if (result)
K
Keith Busch 已提交
1606
		return result;
M
Matias Bjørling 已提交
1607

K
Keith Busch 已提交
1608
	nvmeq->cq_vector = 0;
1609
	nvme_init_queue(nvmeq, 0);
1610
	result = queue_request_irq(nvmeq);
1611 1612
	if (result) {
		nvmeq->cq_vector = -1;
K
Keith Busch 已提交
1613
		return result;
1614
	}
1615

M
Matthew Wilcox 已提交
1616 1617 1618
	return result;
}

1619
static int nvme_create_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1620
{
1621
	unsigned i, max;
1622
	int ret = 0;
K
Keith Busch 已提交
1623

1624
	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1625
		/* vector == qid - 1, match nvme_create_queue */
1626
		if (nvme_alloc_queue(dev, i, dev->q_depth,
1627
		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
1628
			ret = -ENOMEM;
K
Keith Busch 已提交
1629
			break;
1630 1631
		}
	}
K
Keith Busch 已提交
1632

1633
	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1634
	for (i = dev->online_queues; i <= max; i++) {
1635
		ret = nvme_create_queue(&dev->queues[i], i);
K
Keith Busch 已提交
1636
		if (ret)
K
Keith Busch 已提交
1637
			break;
M
Matthew Wilcox 已提交
1638
	}
1639 1640 1641

	/*
	 * Ignore failing Create SQ/CQ commands, we can continue with less
1642 1643
	 * than the desired amount of queues, and even a controller without
	 * I/O queues can still be used to issue admin commands.  This might
1644 1645 1646
	 * be useful to upgrade a buggy firmware for example.
	 */
	return ret >= 0 ? 0 : ret;
M
Matthew Wilcox 已提交
1647 1648
}

1649 1650 1651 1652 1653 1654
static ssize_t nvme_cmb_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));

1655
	return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1656 1657 1658 1659
		       ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);

1660
static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1661
{
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
	u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;

	return 1ULL << (12 + 4 * szu);
}

static u32 nvme_cmb_size(struct nvme_dev *dev)
{
	return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
}

1672
static void nvme_map_cmb(struct nvme_dev *dev)
1673
{
1674
	u64 size, offset;
1675 1676
	resource_size_t bar_size;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1677
	int bar;
1678

1679
	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1680 1681
	if (!dev->cmbsz)
		return;
1682
	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1683

1684
	if (!use_cmb_sqes)
1685
		return;
1686

1687 1688
	size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
	offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1689 1690
	bar = NVME_CMB_BIR(dev->cmbloc);
	bar_size = pci_resource_len(pdev, bar);
1691 1692

	if (offset > bar_size)
1693
		return;
1694 1695 1696 1697 1698 1699 1700 1701 1702

	/*
	 * Controllers may support a CMB size larger than their BAR,
	 * for example, due to being behind a bridge. Reduce the CMB to
	 * the reported size of the BAR
	 */
	if (size > bar_size - offset)
		size = bar_size - offset;

1703 1704 1705
	dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
	if (!dev->cmb)
		return;
1706
	dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1707
	dev->cmb_size = size;
1708 1709 1710 1711 1712

	if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
				    &dev_attr_cmb.attr, NULL))
		dev_warn(dev->ctrl.device,
			 "failed to add sysfs attribute for CMB\n");
1713 1714 1715 1716 1717 1718 1719
}

static inline void nvme_release_cmb(struct nvme_dev *dev)
{
	if (dev->cmb) {
		iounmap(dev->cmb);
		dev->cmb = NULL;
1720 1721 1722
		sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
					     &dev_attr_cmb.attr, NULL);
		dev->cmbsz = 0;
1723 1724 1725
	}
}

1726 1727
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
1728
	u64 dma_addr = dev->host_mem_descs_dma;
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	struct nvme_command c;
	int ret;

	memset(&c, 0, sizeof(c));
	c.features.opcode	= nvme_admin_set_features;
	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
	c.features.dword11	= cpu_to_le32(bits);
	c.features.dword12	= cpu_to_le32(dev->host_mem_size >>
					      ilog2(dev->ctrl.page_size));
	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);

	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
	if (ret) {
		dev_warn(dev->ctrl.device,
			 "failed to set host mem (err %d, flags %#x).\n",
			 ret, bits);
	}
	return ret;
}

static void nvme_free_host_mem(struct nvme_dev *dev)
{
	int i;

	for (i = 0; i < dev->nr_host_mem_descs; i++) {
		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;

		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
				le64_to_cpu(desc->addr));
	}

	kfree(dev->host_mem_desc_bufs);
	dev->host_mem_desc_bufs = NULL;
1765 1766 1767
	dma_free_coherent(dev->dev,
			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
			dev->host_mem_descs, dev->host_mem_descs_dma);
1768
	dev->host_mem_descs = NULL;
1769
	dev->nr_host_mem_descs = 0;
1770 1771
}

1772 1773
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
		u32 chunk_size)
K
Keith Busch 已提交
1774
{
1775
	struct nvme_host_mem_buf_desc *descs;
1776
	u32 max_entries, len;
1777
	dma_addr_t descs_dma;
1778
	int i = 0;
1779
	void **bufs;
1780
	u64 size, tmp;
1781 1782 1783 1784

	tmp = (preferred + chunk_size - 1);
	do_div(tmp, chunk_size);
	max_entries = tmp;
1785 1786 1787 1788

	if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
		max_entries = dev->ctrl.hmmaxd;

1789 1790
	descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
			&descs_dma, GFP_KERNEL);
1791 1792 1793 1794 1795 1796 1797
	if (!descs)
		goto out;

	bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
	if (!bufs)
		goto out_free_descs;

1798
	for (size = 0; size < preferred && i < max_entries; size += len) {
1799 1800
		dma_addr_t dma_addr;

1801
		len = min_t(u64, chunk_size, preferred - size);
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
		if (!bufs[i])
			break;

		descs[i].addr = cpu_to_le64(dma_addr);
		descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
		i++;
	}

1812
	if (!size)
1813 1814 1815 1816 1817
		goto out_free_bufs;

	dev->nr_host_mem_descs = i;
	dev->host_mem_size = size;
	dev->host_mem_descs = descs;
1818
	dev->host_mem_descs_dma = descs_dma;
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
	dev->host_mem_desc_bufs = bufs;
	return 0;

out_free_bufs:
	while (--i >= 0) {
		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;

		dma_free_coherent(dev->dev, size, bufs[i],
				le64_to_cpu(descs[i].addr));
	}

	kfree(bufs);
out_free_descs:
1832 1833
	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
			descs_dma);
1834 1835 1836 1837 1838
out:
	dev->host_mem_descs = NULL;
	return -ENOMEM;
}

1839 1840 1841 1842 1843
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
	u32 chunk_size;

	/* start big and work our way down */
1844
	for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
1845
	     chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
	     chunk_size /= 2) {
		if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
			if (!min || dev->host_mem_size >= min)
				return 0;
			nvme_free_host_mem(dev);
		}
	}

	return -ENOMEM;
}

1857
static int nvme_setup_host_mem(struct nvme_dev *dev)
1858 1859 1860 1861 1862
{
	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
	u64 min = (u64)dev->ctrl.hmmin * 4096;
	u32 enable_bits = NVME_HOST_MEM_ENABLE;
1863
	int ret;
1864 1865 1866 1867 1868 1869 1870

	preferred = min(preferred, max);
	if (min > max) {
		dev_warn(dev->ctrl.device,
			"min host memory (%lld MiB) above limit (%d MiB).\n",
			min >> ilog2(SZ_1M), max_host_mem_size_mb);
		nvme_free_host_mem(dev);
1871
		return 0;
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
	}

	/*
	 * If we already have a buffer allocated check if we can reuse it.
	 */
	if (dev->host_mem_descs) {
		if (dev->host_mem_size >= min)
			enable_bits |= NVME_HOST_MEM_RETURN;
		else
			nvme_free_host_mem(dev);
	}

	if (!dev->host_mem_descs) {
1885 1886 1887
		if (nvme_alloc_host_mem(dev, min, preferred)) {
			dev_warn(dev->ctrl.device,
				"failed to allocate host memory buffer.\n");
1888
			return 0; /* controller must work without HMB */
1889 1890 1891 1892 1893
		}

		dev_info(dev->ctrl.device,
			"allocated %lld MiB host memory buffer.\n",
			dev->host_mem_size >> ilog2(SZ_1M));
1894 1895
	}

1896 1897
	ret = nvme_set_host_mem(dev, enable_bits);
	if (ret)
1898
		nvme_free_host_mem(dev);
1899
	return ret;
K
Keith Busch 已提交
1900 1901
}

1902
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1903
{
1904
	struct nvme_queue *adminq = &dev->queues[0];
1905
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1906 1907
	int result, nr_io_queues;
	unsigned long size;
M
Matthew Wilcox 已提交
1908

1909
	nr_io_queues = num_present_cpus();
C
Christoph Hellwig 已提交
1910 1911
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
	if (result < 0)
M
Matthew Wilcox 已提交
1912
		return result;
C
Christoph Hellwig 已提交
1913

1914
	if (nr_io_queues == 0)
1915
		return 0;
M
Matthew Wilcox 已提交
1916

1917
	if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1918 1919 1920 1921 1922 1923 1924 1925
		result = nvme_cmb_qdepth(dev, nr_io_queues,
				sizeof(struct nvme_command));
		if (result > 0)
			dev->q_depth = result;
		else
			nvme_release_cmb(dev);
	}

1926 1927 1928 1929 1930 1931 1932 1933 1934
	do {
		size = db_bar_size(dev, nr_io_queues);
		result = nvme_remap_bar(dev, size);
		if (!result)
			break;
		if (!--nr_io_queues)
			return -ENOMEM;
	} while (1);
	adminq->q_db = dev->dbs;
1935

K
Keith Busch 已提交
1936
	/* Deregister the admin queue's interrupt */
1937
	pci_free_irq(pdev, 0, adminq);
K
Keith Busch 已提交
1938

1939 1940 1941 1942
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
1943 1944 1945 1946 1947 1948
	pci_free_irq_vectors(pdev);
	nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
			PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
	if (nr_io_queues <= 0)
		return -EIO;
	dev->max_qid = nr_io_queues;
R
Ramachandra Rao Gajula 已提交
1949

1950 1951 1952 1953 1954 1955 1956
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */

1957
	result = queue_request_irq(adminq);
1958 1959
	if (result) {
		adminq->cq_vector = -1;
K
Keith Busch 已提交
1960
		return result;
1961
	}
1962
	return nvme_create_io_queues(dev);
M
Matthew Wilcox 已提交
1963 1964
}

1965
static void nvme_del_queue_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
1966
{
K
Keith Busch 已提交
1967
	struct nvme_queue *nvmeq = req->end_io_data;
1968

K
Keith Busch 已提交
1969 1970
	blk_mq_free_request(req);
	complete(&nvmeq->dev->ioq_wait);
K
Keith Busch 已提交
1971 1972
}

1973
static void nvme_del_cq_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
1974
{
K
Keith Busch 已提交
1975
	struct nvme_queue *nvmeq = req->end_io_data;
K
Keith Busch 已提交
1976

K
Keith Busch 已提交
1977 1978 1979
	if (!error) {
		unsigned long flags;

1980 1981 1982 1983 1984 1985 1986
		/*
		 * We might be called with the AQ q_lock held
		 * and the I/O queue q_lock should always
		 * nest inside the AQ one.
		 */
		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
					SINGLE_DEPTH_NESTING);
K
Keith Busch 已提交
1987 1988
		nvme_process_cq(nvmeq);
		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
K
Keith Busch 已提交
1989
	}
K
Keith Busch 已提交
1990 1991

	nvme_del_queue_end(req, error);
K
Keith Busch 已提交
1992 1993
}

K
Keith Busch 已提交
1994
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1995
{
K
Keith Busch 已提交
1996 1997 1998
	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
	struct request *req;
	struct nvme_command cmd;
1999

K
Keith Busch 已提交
2000 2001 2002
	memset(&cmd, 0, sizeof(cmd));
	cmd.delete_queue.opcode = opcode;
	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2003

2004
	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
K
Keith Busch 已提交
2005 2006
	if (IS_ERR(req))
		return PTR_ERR(req);
2007

K
Keith Busch 已提交
2008 2009 2010 2011 2012 2013 2014
	req->timeout = ADMIN_TIMEOUT;
	req->end_io_data = nvmeq;

	blk_execute_rq_nowait(q, NULL, req, false,
			opcode == nvme_admin_delete_cq ?
				nvme_del_cq_end : nvme_del_queue_end);
	return 0;
2015 2016
}

2017
static void nvme_disable_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
2018
{
2019
	int pass, queues = dev->online_queues - 1;
K
Keith Busch 已提交
2020 2021
	unsigned long timeout;
	u8 opcode = nvme_admin_delete_sq;
K
Keith Busch 已提交
2022

K
Keith Busch 已提交
2023
	for (pass = 0; pass < 2; pass++) {
K
Keith Busch 已提交
2024
		int sent = 0, i = queues;
K
Keith Busch 已提交
2025 2026 2027 2028

		reinit_completion(&dev->ioq_wait);
 retry:
		timeout = ADMIN_TIMEOUT;
2029
		for (; i > 0; i--, sent++)
2030
			if (nvme_delete_queue(&dev->queues[i], opcode))
K
Keith Busch 已提交
2031
				break;
2032

K
Keith Busch 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041
		while (sent--) {
			timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
			if (timeout == 0)
				return;
			if (i)
				goto retry;
		}
		opcode = nvme_admin_delete_cq;
	}
K
Keith Busch 已提交
2042 2043
}

2044
/*
2045
 * return error value only when tagset allocation failed
2046
 */
2047
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2048
{
2049 2050
	int ret;

2051
	if (!dev->ctrl.tagset) {
2052 2053 2054 2055 2056
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev_to_node(dev->dev);
		dev->tagset.queue_depth =
M
Matias Bjørling 已提交
2057
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
C
Chaitanya Kulkarni 已提交
2058 2059 2060 2061 2062
		dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
		if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
			dev->tagset.cmd_size = max(dev->tagset.cmd_size,
					nvme_pci_cmd_size(dev, true));
		}
2063 2064
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
M
Matthew Wilcox 已提交
2065

2066 2067 2068 2069 2070 2071
		ret = blk_mq_alloc_tag_set(&dev->tagset);
		if (ret) {
			dev_warn(dev->ctrl.device,
				"IO queues tagset allocation failed %d\n", ret);
			return ret;
		}
2072
		dev->ctrl.tagset = &dev->tagset;
2073 2074

		nvme_dbbuf_set(dev);
2075 2076 2077 2078 2079
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
2080
	}
2081

K
Keith Busch 已提交
2082
	return 0;
M
Matthew Wilcox 已提交
2083 2084
}

2085
static int nvme_pci_enable(struct nvme_dev *dev)
2086
{
2087
	int result = -ENOMEM;
2088
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2089 2090 2091 2092 2093 2094

	if (pci_enable_device_mem(pdev))
		return result;

	pci_set_master(pdev);

2095 2096
	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
2097
		goto disable;
2098

2099
	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
K
Keith Busch 已提交
2100
		result = -ENODEV;
2101
		goto disable;
K
Keith Busch 已提交
2102
	}
2103 2104

	/*
2105 2106 2107
	 * Some devices and/or platforms don't advertise or work with INTx
	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
	 * adjust this later.
2108
	 */
2109 2110 2111
	result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (result < 0)
		return result;
2112

2113
	dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2114

2115
	dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2116
				io_queue_depth);
2117
	dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2118
	dev->dbs = dev->bar + 4096;
2119 2120 2121 2122 2123 2124 2125

	/*
	 * Temporary fix for the Apple controller found in the MacBook8,1 and
	 * some MacBook7,1 to avoid controller resets and data loss.
	 */
	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
		dev->q_depth = 2;
2126 2127
		dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
			"set queue depth=%u to work around controller resets\n",
2128
			dev->q_depth);
2129 2130
	} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
		   (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2131
		   NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2132 2133 2134
		dev->q_depth = 64;
		dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
                        "set queue depth=%u\n", dev->q_depth);
2135 2136
	}

2137
	nvme_map_cmb(dev);
2138

K
Keith Busch 已提交
2139 2140
	pci_enable_pcie_error_reporting(pdev);
	pci_save_state(pdev);
2141 2142 2143 2144 2145 2146 2147 2148
	return 0;

 disable:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
2149 2150 2151
{
	if (dev->bar)
		iounmap(dev->bar);
2152
	pci_release_mem_regions(to_pci_dev(dev->dev));
2153 2154 2155
}

static void nvme_pci_disable(struct nvme_dev *dev)
2156
{
2157 2158
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2159
	nvme_release_cmb(dev);
2160
	pci_free_irq_vectors(pdev);
2161

K
Keith Busch 已提交
2162 2163
	if (pci_is_enabled(pdev)) {
		pci_disable_pcie_error_reporting(pdev);
2164
		pci_disable_device(pdev);
K
Keith Busch 已提交
2165 2166 2167
	}
}

2168
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
M
Matthew Wilcox 已提交
2169
{
2170
	int i;
K
Keith Busch 已提交
2171 2172
	bool dead = true;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2173

2174
	mutex_lock(&dev->shutdown_lock);
K
Keith Busch 已提交
2175 2176 2177
	if (pci_is_enabled(pdev)) {
		u32 csts = readl(dev->bar + NVME_REG_CSTS);

K
Keith Busch 已提交
2178 2179
		if (dev->ctrl.state == NVME_CTRL_LIVE ||
		    dev->ctrl.state == NVME_CTRL_RESETTING)
K
Keith Busch 已提交
2180 2181 2182
			nvme_start_freeze(&dev->ctrl);
		dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
			pdev->error_state  != pci_channel_io_normal);
2183
	}
2184

K
Keith Busch 已提交
2185 2186 2187 2188
	/*
	 * Give the controller a chance to complete all entered requests if
	 * doing a safe shutdown.
	 */
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
	if (!dead) {
		if (shutdown)
			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);

		/*
		 * If the controller is still alive tell it to stop using the
		 * host memory buffer.  In theory the shutdown / reset should
		 * make sure that it doesn't access the host memoery anymore,
		 * but I'd rather be safe than sorry..
		 */
		if (dev->host_mem_descs)
			nvme_set_host_mem(dev, 0);

	}
K
Keith Busch 已提交
2203 2204
	nvme_stop_queues(&dev->ctrl);

2205 2206
	if (!dead) {
		nvme_disable_io_queues(dev);
2207
		nvme_disable_admin_queue(dev, shutdown);
K
Keith Busch 已提交
2208
	}
2209 2210 2211
	for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
		nvme_suspend_queue(&dev->queues[i]);

2212
	nvme_pci_disable(dev);
2213

2214 2215
	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
K
Keith Busch 已提交
2216 2217 2218 2219 2220 2221 2222 2223

	/*
	 * The driver will not be starting up queues again if shutting down so
	 * must flush all entered requests to their failed completion to avoid
	 * deadlocking blk-mq hot-cpu notifier.
	 */
	if (shutdown)
		nvme_start_queues(&dev->ctrl);
2224
	mutex_unlock(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
2225 2226
}

M
Matthew Wilcox 已提交
2227 2228
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
2229
	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
M
Matthew Wilcox 已提交
2230 2231 2232 2233
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

2234
	/* Optimisation for I/Os between 4k and 128k */
2235
	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2236 2237 2238 2239 2240
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
2241 2242 2243 2244 2245 2246
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
2247
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
2248 2249
}

2250
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2251
{
2252
	struct nvme_dev *dev = to_nvme_dev(ctrl);
2253

2254
	nvme_dbbuf_dma_free(dev);
2255
	put_device(dev->dev);
2256 2257
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
2258 2259
	if (dev->ctrl.admin_q)
		blk_put_queue(dev->ctrl.admin_q);
2260
	kfree(dev->queues);
2261
	free_opal_dev(dev->ctrl.opal_dev);
2262 2263 2264
	kfree(dev);
}

2265 2266
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
2267
	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
2268

2269
	nvme_get_ctrl(&dev->ctrl);
2270
	nvme_dev_disable(dev, false);
2271
	if (!queue_work(nvme_wq, &dev->remove_work))
2272 2273 2274
		nvme_put_ctrl(&dev->ctrl);
}

2275
static void nvme_reset_work(struct work_struct *work)
2276
{
2277 2278
	struct nvme_dev *dev =
		container_of(work, struct nvme_dev, ctrl.reset_work);
2279
	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2280
	int result = -ENODEV;
2281
	enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
2282

2283
	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
2284
		goto out;
2285

2286 2287 2288 2289
	/*
	 * If we're called to reset a live controller first shut it down before
	 * moving on.
	 */
2290
	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2291
		nvme_dev_disable(dev, false);
2292

2293
	/*
2294
	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2295 2296
	 * initializing procedure here.
	 */
2297
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2298
		dev_warn(dev->ctrl.device,
2299
			"failed to mark controller CONNECTING\n");
2300 2301 2302
		goto out;
	}

2303
	result = nvme_pci_enable(dev);
2304
	if (result)
2305
		goto out;
2306

2307
	result = nvme_pci_configure_admin_queue(dev);
2308
	if (result)
2309
		goto out;
2310

K
Keith Busch 已提交
2311 2312
	result = nvme_alloc_admin_tags(dev);
	if (result)
2313
		goto out;
2314

2315 2316
	result = nvme_init_identify(&dev->ctrl);
	if (result)
2317
		goto out;
2318

2319 2320 2321 2322 2323 2324 2325 2326 2327
	if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
		if (!dev->ctrl.opal_dev)
			dev->ctrl.opal_dev =
				init_opal_dev(&dev->ctrl, &nvme_sec_submit);
		else if (was_suspend)
			opal_unlock_from_suspend(dev->ctrl.opal_dev);
	} else {
		free_opal_dev(dev->ctrl.opal_dev);
		dev->ctrl.opal_dev = NULL;
2328
	}
2329

2330 2331 2332 2333 2334 2335 2336
	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
		result = nvme_dbbuf_dma_alloc(dev);
		if (result)
			dev_warn(dev->dev,
				 "unable to allocate dma for dbbuf\n");
	}

2337 2338 2339 2340 2341
	if (dev->ctrl.hmpre) {
		result = nvme_setup_host_mem(dev);
		if (result < 0)
			goto out;
	}
2342

2343
	result = nvme_setup_io_queues(dev);
2344
	if (result)
2345
		goto out;
2346

2347 2348 2349 2350
	/*
	 * Keep the controller around but remove all namespaces if we don't have
	 * any working I/O queue.
	 */
2351
	if (dev->online_queues < 2) {
2352
		dev_warn(dev->ctrl.device, "IO queues not created\n");
2353
		nvme_kill_queues(&dev->ctrl);
2354
		nvme_remove_namespaces(&dev->ctrl);
2355
		new_state = NVME_CTRL_ADMIN_ONLY;
2356
	} else {
2357
		nvme_start_queues(&dev->ctrl);
K
Keith Busch 已提交
2358
		nvme_wait_freeze(&dev->ctrl);
2359 2360 2361
		/* hit this only when allocate tagset fails */
		if (nvme_dev_add(dev))
			new_state = NVME_CTRL_ADMIN_ONLY;
K
Keith Busch 已提交
2362
		nvme_unfreeze(&dev->ctrl);
2363 2364
	}

2365 2366 2367 2368 2369 2370 2371
	/*
	 * If only admin queue live, keep it to do further investigation or
	 * recovery.
	 */
	if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
		dev_warn(dev->ctrl.device,
			"failed to mark controller state %d\n", new_state);
2372 2373
		goto out;
	}
2374

2375
	nvme_start_ctrl(&dev->ctrl);
2376
	return;
2377

2378
 out:
2379
	nvme_remove_dead_ctrl(dev, result);
2380 2381
}

2382
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
K
Keith Busch 已提交
2383
{
2384
	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2385
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
2386

2387
	nvme_kill_queues(&dev->ctrl);
K
Keith Busch 已提交
2388
	if (pci_get_drvdata(pdev))
K
Keith Busch 已提交
2389
		device_release_driver(&pdev->dev);
2390
	nvme_put_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2391 2392
}

2393
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
T
Tejun Heo 已提交
2394
{
2395
	*val = readl(to_nvme_dev(ctrl)->bar + off);
2396
	return 0;
T
Tejun Heo 已提交
2397 2398
}

2399
static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2400
{
2401 2402 2403
	writel(val, to_nvme_dev(ctrl)->bar + off);
	return 0;
}
2404

2405 2406 2407 2408
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
	*val = readq(to_nvme_dev(ctrl)->bar + off);
	return 0;
2409 2410
}

2411
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
M
Ming Lin 已提交
2412
	.name			= "pcie",
2413
	.module			= THIS_MODULE,
2414
	.flags			= NVME_F_METADATA_SUPPORTED,
2415
	.reg_read32		= nvme_pci_reg_read32,
2416
	.reg_write32		= nvme_pci_reg_write32,
2417
	.reg_read64		= nvme_pci_reg_read64,
2418
	.free_ctrl		= nvme_pci_free_ctrl,
2419
	.submit_async_event	= nvme_pci_submit_async_event,
2420
};
2421

2422 2423 2424 2425
static int nvme_dev_map(struct nvme_dev *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2426
	if (pci_request_mem_regions(pdev, "nvme"))
2427 2428
		return -ENODEV;

2429
	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2430 2431
		goto release;

M
Max Gurtovoy 已提交
2432
	return 0;
2433
  release:
M
Max Gurtovoy 已提交
2434 2435
	pci_release_mem_regions(pdev);
	return -ENODEV;
2436 2437
}

2438
static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
{
	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
		/*
		 * Several Samsung devices seem to drop off the PCIe bus
		 * randomly when APST is on and uses the deepest sleep state.
		 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
		 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
		 * 950 PRO 256GB", but it seems to be restricted to two Dell
		 * laptops.
		 */
		if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
			return NVME_QUIRK_NO_DEEPEST_PS;
2453 2454 2455 2456 2457 2458 2459 2460
	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
		/*
		 * Samsung SSD 960 EVO drops off the PCIe bus after system
		 * suspend on a Ryzen board, ASUS PRIME B350M-A.
		 */
		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
		    dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
			return NVME_QUIRK_NO_APST;
2461 2462 2463 2464 2465
	}

	return 0;
}

2466
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
2467
{
M
Matias Bjørling 已提交
2468
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
2469
	struct nvme_dev *dev;
2470
	unsigned long quirks = id->driver_data;
M
Matthew Wilcox 已提交
2471

M
Matias Bjørling 已提交
2472 2473
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
2474
		set_dev_node(&pdev->dev, first_memory_node);
M
Matias Bjørling 已提交
2475 2476

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2477 2478
	if (!dev)
		return -ENOMEM;
2479 2480 2481

	dev->queues = kcalloc_node(num_possible_cpus() + 1,
			sizeof(struct nvme_queue), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2482 2483 2484
	if (!dev->queues)
		goto free;

2485
	dev->dev = get_device(&pdev->dev);
K
Keith Busch 已提交
2486
	pci_set_drvdata(pdev, dev);
2487

2488 2489
	result = nvme_dev_map(dev);
	if (result)
2490
		goto put_pci;
2491

2492
	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2493
	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2494
	mutex_init(&dev->shutdown_lock);
K
Keith Busch 已提交
2495
	init_completion(&dev->ioq_wait);
M
Matthew Wilcox 已提交
2496

M
Matthew Wilcox 已提交
2497 2498
	result = nvme_setup_prp_pools(dev);
	if (result)
2499
		goto unmap;
2500

2501
	quirks |= check_vendor_combination_bug(pdev);
2502

2503
	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2504
			quirks);
2505
	if (result)
2506
		goto release_pools;
2507

2508 2509
	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

2510 2511
	nvme_reset_ctrl(&dev->ctrl);

M
Matthew Wilcox 已提交
2512 2513
	return 0;

2514
 release_pools:
M
Matthew Wilcox 已提交
2515
	nvme_release_prp_pools(dev);
2516 2517
 unmap:
	nvme_dev_unmap(dev);
K
Keith Busch 已提交
2518
 put_pci:
2519
	put_device(dev->dev);
M
Matthew Wilcox 已提交
2520 2521 2522 2523 2524 2525
 free:
	kfree(dev->queues);
	kfree(dev);
	return result;
}

2526
static void nvme_reset_prepare(struct pci_dev *pdev)
2527
{
K
Keith Busch 已提交
2528
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2529
	nvme_dev_disable(dev, false);
2530
}
2531

2532 2533
static void nvme_reset_done(struct pci_dev *pdev)
{
2534
	struct nvme_dev *dev = pci_get_drvdata(pdev);
S
Sagi Grimberg 已提交
2535
	nvme_reset_ctrl_sync(&dev->ctrl);
2536 2537
}

2538 2539 2540
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2541
	nvme_dev_disable(dev, true);
2542 2543
}

2544 2545 2546 2547 2548
/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
2549
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2550 2551
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2552

2553 2554
	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);

2555
	cancel_work_sync(&dev->ctrl.reset_work);
K
Keith Busch 已提交
2556
	pci_set_drvdata(pdev, NULL);
2557

2558
	if (!pci_device_is_present(pdev)) {
2559
		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2560 2561
		nvme_dev_disable(dev, false);
	}
2562

2563
	flush_work(&dev->ctrl.reset_work);
2564 2565
	nvme_stop_ctrl(&dev->ctrl);
	nvme_remove_namespaces(&dev->ctrl);
2566
	nvme_dev_disable(dev, true);
2567
	nvme_free_host_mem(dev);
M
Matias Bjørling 已提交
2568
	nvme_dev_remove_admin(dev);
2569
	nvme_free_queues(dev, 0);
2570
	nvme_uninit_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2571
	nvme_release_prp_pools(dev);
2572
	nvme_dev_unmap(dev);
2573
	nvme_put_ctrl(&dev->ctrl);
M
Matthew Wilcox 已提交
2574 2575
}

K
Keith Busch 已提交
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
{
	int ret = 0;

	if (numvfs == 0) {
		if (pci_vfs_assigned(pdev)) {
			dev_warn(&pdev->dev,
				"Cannot disable SR-IOV VFs while assigned\n");
			return -EPERM;
		}
		pci_disable_sriov(pdev);
		return 0;
	}

	ret = pci_enable_sriov(pdev, numvfs);
	return ret ? ret : numvfs;
}

2594
#ifdef CONFIG_PM_SLEEP
2595 2596 2597 2598 2599
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2600
	nvme_dev_disable(ndev, true);
2601 2602 2603 2604 2605 2606 2607 2608
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2609
	nvme_reset_ctrl(&ndev->ctrl);
K
Keith Busch 已提交
2610
	return 0;
2611
}
2612
#endif
2613 2614

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2615

K
Keith Busch 已提交
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	/*
	 * A frozen channel requires a reset. When detected, this method will
	 * shutdown the controller to quiesce. The controller will be restarted
	 * after the slot reset through driver's slot_reset callback.
	 */
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
K
Keith Busch 已提交
2630 2631
		dev_warn(dev->ctrl.device,
			"frozen state error detected, reset controller\n");
2632
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2633 2634
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
K
Keith Busch 已提交
2635 2636
		dev_warn(dev->ctrl.device,
			"failure state error detected, request disconnect\n");
K
Keith Busch 已提交
2637 2638 2639 2640 2641 2642 2643 2644 2645
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

2646
	dev_info(dev->ctrl.device, "restart after slot reset\n");
K
Keith Busch 已提交
2647
	pci_restore_state(pdev);
2648
	nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2649 2650 2651 2652 2653 2654 2655 2656
	return PCI_ERS_RESULT_RECOVERED;
}

static void nvme_error_resume(struct pci_dev *pdev)
{
	pci_cleanup_aer_uncorrect_error_status(pdev);
}

2657
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2658 2659 2660
	.error_detected	= nvme_error_detected,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2661 2662
	.reset_prepare	= nvme_reset_prepare,
	.reset_done	= nvme_reset_done,
M
Matthew Wilcox 已提交
2663 2664
};

2665
static const struct pci_device_id nvme_id_table[] = {
2666
	{ PCI_VDEVICE(INTEL, 0x0953),
2667
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2668
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2669 2670
	{ PCI_VDEVICE(INTEL, 0x0a53),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2671
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2672 2673
	{ PCI_VDEVICE(INTEL, 0x0a54),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2674
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2675 2676 2677
	{ PCI_VDEVICE(INTEL, 0x0a55),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2678 2679
	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2680 2681
	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2682 2683
	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2684 2685
	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2686 2687
	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2688 2689 2690 2691
	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
C
Christoph Hellwig 已提交
2692 2693 2694 2695
	{ PCI_DEVICE(0x1d1d, 0x1f1f),	/* LighNVM qemu device */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
	{ PCI_DEVICE(0x1d1d, 0x2807),	/* CNEX WL */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
M
Matthew Wilcox 已提交
2696
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2697
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2698
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
M
Matthew Wilcox 已提交
2699 2700 2701 2702 2703 2704 2705 2706
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2707
	.remove		= nvme_remove,
2708
	.shutdown	= nvme_shutdown,
2709 2710 2711
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
K
Keith Busch 已提交
2712
	.sriov_configure = nvme_pci_sriov_configure,
M
Matthew Wilcox 已提交
2713 2714 2715 2716 2717
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2718
	return pci_register_driver(&nvme_driver);
M
Matthew Wilcox 已提交
2719 2720 2721 2722 2723
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
2724
	flush_workqueue(nvme_wq);
2725
	_nvme_check_size();
M
Matthew Wilcox 已提交
2726 2727 2728 2729
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2730
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2731 2732
module_init(nvme_init);
module_exit(nvme_exit);