pci.c 68.8 KB
Newer Older
M
Matthew Wilcox 已提交
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

K
Keith Busch 已提交
15
#include <linux/aer.h>
M
Matthew Wilcox 已提交
16
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
17
#include <linux/blk-mq.h>
18
#include <linux/blk-mq-pci.h>
19
#include <linux/dmi.h>
M
Matthew Wilcox 已提交
20 21 22 23 24
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/once.h>
M
Matthew Wilcox 已提交
27
#include <linux/pci.h>
K
Keith Busch 已提交
28
#include <linux/t10-pi.h>
M
Matthew Wilcox 已提交
29
#include <linux/types.h>
30
#include <linux/io-64-nonatomic-lo-hi.h>
31
#include <linux/sed-opal.h>
32

33 34
#include "nvme.h"

M
Matthew Wilcox 已提交
35 36
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
37

C
Chaitanya Kulkarni 已提交
38
#define SGES_PER_PAGE	(PAGE_SIZE / sizeof(struct nvme_sgl_desc))
39

40 41 42
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

43 44 45 46
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

47 48 49 50
static unsigned int max_host_mem_size_mb = 128;
module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
51

C
Chaitanya Kulkarni 已提交
52 53 54 55 56 57
static unsigned int sgl_threshold = SZ_32K;
module_param(sgl_threshold, uint, 0644);
MODULE_PARM_DESC(sgl_threshold,
		"Use SGLs when average request segment size is larger or equal to "
		"this size. Use 0 to disable SGLs.");

58 59 60 61 62 63 64 65 66 67
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
	.set = io_queue_depth_set,
	.get = param_get_int,
};

static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");

68 69
struct nvme_dev;
struct nvme_queue;
70

J
Jens Axboe 已提交
71
static void nvme_process_cq(struct nvme_queue *nvmeq);
72
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
73

74 75 76 77
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
78
	struct nvme_queue *queues;
79 80 81 82 83 84 85 86 87 88 89
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
	u32 db_stride;
	void __iomem *bar;
90
	unsigned long bar_mapped_size;
91
	struct work_struct remove_work;
92
	struct mutex shutdown_lock;
93 94
	bool subsystem;
	void __iomem *cmb;
95
	pci_bus_addr_t cmb_bus_addr;
96 97
	u64 cmb_size;
	u32 cmbsz;
98
	u32 cmbloc;
99
	struct nvme_ctrl ctrl;
K
Keith Busch 已提交
100
	struct completion ioq_wait;
101 102

	/* shadow doorbell buffer support: */
103 104 105 106
	u32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	u32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;
107 108 109 110

	/* host memory buffer support: */
	u64 host_mem_size;
	u32 nr_host_mem_descs;
111
	dma_addr_t host_mem_descs_dma;
112 113
	struct nvme_host_mem_buf_desc *host_mem_descs;
	void **host_mem_desc_bufs;
K
Keith Busch 已提交
114
};
115

116 117 118 119 120 121 122 123 124 125 126
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
	int n = 0, ret;

	ret = kstrtoint(val, 10, &n);
	if (ret != 0 || n < 2)
		return -EINVAL;

	return param_set_int(val, kp);
}

127 128 129 130 131 132 133 134 135 136
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
	return qid * 2 * stride;
}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{
	return (qid * 2 + 1) * stride;
}

137 138 139 140 141
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

M
Matthew Wilcox 已提交
142 143 144 145 146 147
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
148
	struct nvme_dev *dev;
M
Matthew Wilcox 已提交
149 150
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
151
	struct nvme_command __iomem *sq_cmds_io;
M
Matthew Wilcox 已提交
152
	volatile struct nvme_completion *cqes;
153
	struct blk_mq_tags **tags;
M
Matthew Wilcox 已提交
154 155 156 157
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
J
Jens Axboe 已提交
158
	s16 cq_vector;
M
Matthew Wilcox 已提交
159 160
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
161
	u16 qid;
162 163
	u8 cq_phase;
	u8 cqe_seen;
164 165 166 167
	u32 *dbbuf_sq_db;
	u32 *dbbuf_cq_db;
	u32 *dbbuf_sq_ei;
	u32 *dbbuf_cq_ei;
M
Matthew Wilcox 已提交
168 169
};

170 171 172
/*
 * The nvme_iod describes the data in an I/O, including the list of PRP
 * entries.  You can't see it in this data structure because C doesn't let
C
Christoph Hellwig 已提交
173
 * me express that.  Use nvme_init_iod to ensure there's enough space
174 175 176
 * allocated to store the PRP list.
 */
struct nvme_iod {
177
	struct nvme_request req;
C
Christoph Hellwig 已提交
178
	struct nvme_queue *nvmeq;
C
Chaitanya Kulkarni 已提交
179
	bool use_sgl;
C
Christoph Hellwig 已提交
180
	int aborted;
181 182 183 184
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	int length;		/* Of data, in bytes */
	dma_addr_t first_dma;
185
	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
C
Christoph Hellwig 已提交
186 187
	struct scatterlist *sg;
	struct scatterlist inline_sg[0];
M
Matthew Wilcox 已提交
188 189 190 191 192 193 194 195 196 197 198 199
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
200
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
201
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
202
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
203 204
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
M
Matthew Wilcox 已提交
205
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
206
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}

static inline unsigned int nvme_dbbuf_size(u32 stride)
{
	return ((num_possible_cpus() + 1) * 8 * stride);
}

static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs)
		return 0;

	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_dbs_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_dbs)
		return -ENOMEM;
	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_eis_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
	}
	if (dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
		dev->dbbuf_eis = NULL;
	}
}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{
	if (!dev->dbbuf_dbs || !qid)
		return;

	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{
	struct nvme_command c;

	if (!dev->dbbuf_dbs)
		return;

	memset(&c, 0, sizeof(c));
	c.dbbuf.opcode = nvme_admin_dbbuf;
	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);

	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
281
		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		/* Free memory and continue on */
		nvme_dbbuf_dma_free(dev);
	}
}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
					      volatile u32 *dbbuf_ei)
{
	if (dbbuf_db) {
		u16 old_value;

		/*
		 * Ensure that the queue is written before updating
		 * the doorbell in memory
		 */
		wmb();

		old_value = *dbbuf_db;
		*dbbuf_db = value;

		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
			return false;
	}

	return true;
M
Matthew Wilcox 已提交
313 314
}

315 316 317 318
/*
 * Max size of iod being embedded in the request payload
 */
#define NVME_INT_PAGES		2
319
#define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
320 321 322 323 324 325 326 327

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
328 329
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
330 331 332
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

C
Chaitanya Kulkarni 已提交
333 334 335 336 337
/*
 * Calculates the number of pages needed for the SGL segments. For example a 4k
 * page can accommodate 256 SGL descriptors.
 */
static int nvme_pci_npages_sgl(unsigned int num_seg)
338
{
C
Chaitanya Kulkarni 已提交
339
	return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
C
Christoph Hellwig 已提交
340
}
341

C
Chaitanya Kulkarni 已提交
342 343
static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg, bool use_sgl)
C
Christoph Hellwig 已提交
344
{
C
Chaitanya Kulkarni 已提交
345 346 347 348 349 350 351 352
	size_t alloc_size;

	if (use_sgl)
		alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
	else
		alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);

	return alloc_size + sizeof(struct scatterlist) * nseg;
C
Christoph Hellwig 已提交
353
}
354

C
Chaitanya Kulkarni 已提交
355
static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
C
Christoph Hellwig 已提交
356
{
C
Chaitanya Kulkarni 已提交
357 358 359 360 361
	unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
				    NVME_INT_BYTES(dev), NVME_INT_PAGES,
				    use_sgl);

	return sizeof(struct nvme_iod) + alloc_size;
362 363
}

M
Matias Bjørling 已提交
364 365
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
366
{
M
Matias Bjørling 已提交
367
	struct nvme_dev *dev = data;
368
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
369

370 371 372 373
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

M
Matias Bjørling 已提交
374
	hctx->driver_data = nvmeq;
375
	nvmeq->tags = &dev->admin_tagset.tags[0];
M
Matias Bjørling 已提交
376
	return 0;
377 378
}

379 380 381 382 383 384 385
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

M
Matias Bjørling 已提交
386 387
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
388
{
M
Matias Bjørling 已提交
389
	struct nvme_dev *dev = data;
390
	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
M
Matias Bjørling 已提交
391

392 393
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
M
Matthew Wilcox 已提交
394

395
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
M
Matias Bjørling 已提交
396 397
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
398 399
}

400 401
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
		unsigned int hctx_idx, unsigned int numa_node)
M
Matthew Wilcox 已提交
402
{
403
	struct nvme_dev *dev = set->driver_data;
C
Christoph Hellwig 已提交
404
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
405
	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
406
	struct nvme_queue *nvmeq = &dev->queues[queue_idx];
M
Matias Bjørling 已提交
407 408

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
409
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
410 411 412
	return 0;
}

413 414 415 416 417 418 419
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_dev *dev = set->driver_data;

	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
}

M
Matthew Wilcox 已提交
420
/**
421
 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
422 423 424 425 426
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
427 428
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
						struct nvme_command *cmd)
M
Matthew Wilcox 已提交
429
{
M
Matias Bjørling 已提交
430 431
	u16 tail = nvmeq->sq_tail;

432 433 434 435 436
	if (nvmeq->sq_cmds_io)
		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
	else
		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));

M
Matthew Wilcox 已提交
437 438
	if (++tail == nvmeq->q_depth)
		tail = 0;
439 440 441
	if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
					      nvmeq->dbbuf_sq_ei))
		writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
442 443 444
	nvmeq->sq_tail = tail;
}

C
Chaitanya Kulkarni 已提交
445
static void **nvme_pci_iod_list(struct request *req)
M
Matthew Wilcox 已提交
446
{
C
Christoph Hellwig 已提交
447
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Chaitanya Kulkarni 已提交
448
	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
M
Matthew Wilcox 已提交
449 450
}

451 452 453
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454
	int nseg = blk_rq_nr_phys_segments(req);
455 456
	unsigned int avg_seg_size;

457 458 459 460
	if (nseg == 0)
		return false;

	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
461 462 463 464 465 466 467 468 469 470

	if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
		return false;
	if (!iod->nvmeq->qid)
		return false;
	if (!sgl_threshold || avg_seg_size < sgl_threshold)
		return false;
	return true;
}

471
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
472
{
C
Christoph Hellwig 已提交
473
	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
474
	int nseg = blk_rq_nr_phys_segments(rq);
475
	unsigned int size = blk_rq_payload_bytes(rq);
476

477 478
	iod->use_sgl = nvme_pci_use_sgls(dev, rq);

C
Christoph Hellwig 已提交
479
	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
C
Chaitanya Kulkarni 已提交
480 481 482 483
		size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
				iod->use_sgl);

		iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
C
Christoph Hellwig 已提交
484
		if (!iod->sg)
485
			return BLK_STS_RESOURCE;
C
Christoph Hellwig 已提交
486 487
	} else {
		iod->sg = iod->inline_sg;
488 489
	}

C
Christoph Hellwig 已提交
490 491 492 493
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->length = size;
K
Keith Busch 已提交
494

495
	return BLK_STS_OK;
496 497
}

C
Christoph Hellwig 已提交
498
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
499
{
C
Christoph Hellwig 已提交
500
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Chaitanya Kulkarni 已提交
501 502 503
	const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;

504 505 506
	int i;

	if (iod->npages == 0)
C
Chaitanya Kulkarni 已提交
507 508 509
		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
			dma_addr);

510
	for (i = 0; i < iod->npages; i++) {
C
Chaitanya Kulkarni 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		void *addr = nvme_pci_iod_list(req)[i];

		if (iod->use_sgl) {
			struct nvme_sgl_desc *sg_list = addr;

			next_dma_addr =
			    le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
		} else {
			__le64 *prp_list = addr;

			next_dma_addr = le64_to_cpu(prp_list[last_prp]);
		}

		dma_pool_free(dev->prp_page_pool, addr, dma_addr);
		dma_addr = next_dma_addr;
526
	}
527

C
Christoph Hellwig 已提交
528 529
	if (iod->sg != iod->inline_sg)
		kfree(iod->sg);
K
Keith Busch 已提交
530 531
}

532
#ifdef CONFIG_BLK_DEV_INTEGRITY
K
Keith Busch 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == v)
		pi->ref_tag = cpu_to_be32(p);
}

static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == p)
		pi->ref_tag = cpu_to_be32(v);
}

/**
 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
 *
 * The virtual start sector is the one that was originally submitted by the
 * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
 * start sector may be different. Remap protection information to match the
 * physical LBA on writes, and back to the original seed on reads.
 *
 * Type 0 and 3 do not have a ref tag, so no remapping required.
 */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
	struct nvme_ns *ns = req->rq_disk->private_data;
	struct bio_integrity_payload *bip;
	struct t10_pi_tuple *pi;
	void *p, *pmap;
	u32 i, nlb, ts, phys, virt;

	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
		return;

	bip = bio_integrity(req->bio);
	if (!bip)
		return;

	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;

	p = pmap;
	virt = bip_get_seed(bip);
	phys = nvme_block_nr(ns, blk_rq_pos(req));
	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
577
	ts = ns->disk->queue->integrity.tuple_size;
K
Keith Busch 已提交
578 579 580 581 582 583 584 585

	for (i = 0; i < nlb; i++, virt++, phys++) {
		pi = (struct t10_pi_tuple *)p;
		dif_swap(phys, virt, pi);
		p += ts;
	}
	kunmap_atomic(pmap);
}
586 587 588 589 590 591 592 593 594 595 596 597 598
#else /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
}
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
#endif

599 600 601 602 603 604 605 606 607 608 609 610 611 612
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		dma_addr_t phys = sg_phys(sg);
		pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
			"dma_address:%pad dma_length:%d\n",
			i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
			sg_dma_len(sg));
	}
}

C
Chaitanya Kulkarni 已提交
613 614
static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd)
M
Matthew Wilcox 已提交
615
{
C
Christoph Hellwig 已提交
616
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
617
	struct dma_pool *pool;
618
	int length = blk_rq_payload_bytes(req);
619
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
620 621
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
622
	u32 page_size = dev->ctrl.page_size;
623
	int offset = dma_addr & (page_size - 1);
624
	__le64 *prp_list;
C
Chaitanya Kulkarni 已提交
625
	void **list = nvme_pci_iod_list(req);
626
	dma_addr_t prp_dma;
627
	int nprps, i;
M
Matthew Wilcox 已提交
628

629
	length -= (page_size - offset);
630 631
	if (length <= 0) {
		iod->first_dma = 0;
C
Chaitanya Kulkarni 已提交
632
		goto done;
633
	}
M
Matthew Wilcox 已提交
634

635
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
636
	if (dma_len) {
637
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
638 639 640 641 642 643
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

644
	if (length <= page_size) {
645
		iod->first_dma = dma_addr;
C
Chaitanya Kulkarni 已提交
646
		goto done;
647 648
	}

649
	nprps = DIV_ROUND_UP(length, page_size);
650 651
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
652
		iod->npages = 0;
653 654
	} else {
		pool = dev->prp_page_pool;
655
		iod->npages = 1;
656 657
	}

658
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
659
	if (!prp_list) {
660
		iod->first_dma = dma_addr;
661
		iod->npages = -1;
662
		return BLK_STS_RESOURCE;
663
	}
664 665
	list[0] = prp_list;
	iod->first_dma = prp_dma;
666 667
	i = 0;
	for (;;) {
668
		if (i == page_size >> 3) {
669
			__le64 *old_prp_list = prp_list;
670
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
671
			if (!prp_list)
672
				return BLK_STS_RESOURCE;
673
			list[iod->npages++] = prp_list;
674 675 676
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
677 678
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
679 680 681
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
682 683 684 685
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
686 687
		if (unlikely(dma_len < 0))
			goto bad_sgl;
688 689 690
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
691 692
	}

C
Chaitanya Kulkarni 已提交
693 694 695 696
done:
	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);

697 698 699
	return BLK_STS_OK;

 bad_sgl:
700 701 702
	WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
			"Invalid SGL for payload:%d nents:%d\n",
			blk_rq_payload_bytes(req), iod->nents);
703
	return BLK_STS_IOERR;
M
Matthew Wilcox 已提交
704 705
}

C
Chaitanya Kulkarni 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
		struct scatterlist *sg)
{
	sge->addr = cpu_to_le64(sg_dma_address(sg));
	sge->length = cpu_to_le32(sg_dma_len(sg));
	sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}

static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
		dma_addr_t dma_addr, int entries)
{
	sge->addr = cpu_to_le64(dma_addr);
	if (entries < SGES_PER_PAGE) {
		sge->length = cpu_to_le32(entries * sizeof(*sge));
		sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
	} else {
		sge->length = cpu_to_le32(PAGE_SIZE);
		sge->type = NVME_SGL_FMT_SEG_DESC << 4;
	}
}

static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
728
		struct request *req, struct nvme_rw_command *cmd, int entries)
C
Chaitanya Kulkarni 已提交
729 730 731 732 733 734
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct dma_pool *pool;
	struct nvme_sgl_desc *sg_list;
	struct scatterlist *sg = iod->sg;
	dma_addr_t sgl_dma;
735
	int i = 0;
C
Chaitanya Kulkarni 已提交
736 737 738 739

	/* setting the transfer type as SGL */
	cmd->flags = NVME_CMD_SGL_METABUF;

740
	if (entries == 1) {
C
Chaitanya Kulkarni 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
		nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
		return BLK_STS_OK;
	}

	if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
		pool = dev->prp_small_pool;
		iod->npages = 0;
	} else {
		pool = dev->prp_page_pool;
		iod->npages = 1;
	}

	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
	if (!sg_list) {
		iod->npages = -1;
		return BLK_STS_RESOURCE;
	}

	nvme_pci_iod_list(req)[0] = sg_list;
	iod->first_dma = sgl_dma;

	nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);

	do {
		if (i == SGES_PER_PAGE) {
			struct nvme_sgl_desc *old_sg_desc = sg_list;
			struct nvme_sgl_desc *link = &old_sg_desc[i - 1];

			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
			if (!sg_list)
				return BLK_STS_RESOURCE;

			i = 0;
			nvme_pci_iod_list(req)[iod->npages++] = sg_list;
			sg_list[i++] = *link;
			nvme_pci_sgl_set_seg(link, sgl_dma, entries);
		}

		nvme_pci_sgl_set_data(&sg_list[i++], sg);
		sg = sg_next(sg);
781
	} while (--entries > 0);
C
Chaitanya Kulkarni 已提交
782 783 784 785

	return BLK_STS_OK;
}

786
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
787
		struct nvme_command *cmnd)
788
{
C
Christoph Hellwig 已提交
789
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
790 791 792
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
793
	blk_status_t ret = BLK_STS_IOERR;
794
	int nr_mapped;
795

796
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
C
Christoph Hellwig 已提交
797 798 799
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
800

801
	ret = BLK_STS_RESOURCE;
802 803 804
	nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
			DMA_ATTR_NO_WARN);
	if (!nr_mapped)
C
Christoph Hellwig 已提交
805
		goto out;
806

807
	if (iod->use_sgl)
808
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
C
Chaitanya Kulkarni 已提交
809 810 811
	else
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);

812
	if (ret != BLK_STS_OK)
C
Christoph Hellwig 已提交
813
		goto out_unmap;
814

815
	ret = BLK_STS_IOERR;
C
Christoph Hellwig 已提交
816 817 818
	if (blk_integrity_rq(req)) {
		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
			goto out_unmap;
819

820 821
		sg_init_table(&iod->meta_sg, 1);
		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
C
Christoph Hellwig 已提交
822
			goto out_unmap;
823

824
		if (req_op(req) == REQ_OP_WRITE)
C
Christoph Hellwig 已提交
825
			nvme_dif_remap(req, nvme_dif_prep);
826

827
		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
C
Christoph Hellwig 已提交
828
			goto out_unmap;
829
	}
M
Matthew Wilcox 已提交
830

C
Christoph Hellwig 已提交
831
	if (blk_integrity_rq(req))
832
		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
833
	return BLK_STS_OK;
M
Matthew Wilcox 已提交
834

C
Christoph Hellwig 已提交
835 836 837 838
out_unmap:
	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
	return ret;
M
Matthew Wilcox 已提交
839 840
}

C
Christoph Hellwig 已提交
841
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
842
{
C
Christoph Hellwig 已提交
843
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
844 845 846 847 848 849
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
850
			if (req_op(req) == REQ_OP_READ)
851
				nvme_dif_remap(req, nvme_dif_complete);
852
			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
K
Keith Busch 已提交
853
		}
854
	}
K
Keith Busch 已提交
855

856
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
857
	nvme_free_iod(dev, req);
858
}
M
Matthew Wilcox 已提交
859

860 861 862
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
863
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
M
Matias Bjørling 已提交
864
			 const struct blk_mq_queue_data *bd)
865
{
M
Matias Bjørling 已提交
866 867
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
868
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
869
	struct request *req = bd->rq;
C
Christoph Hellwig 已提交
870
	struct nvme_command cmnd;
871
	blk_status_t ret;
K
Keith Busch 已提交
872

873
	ret = nvme_setup_cmd(ns, req, &cmnd);
874
	if (ret)
C
Christoph Hellwig 已提交
875
		return ret;
M
Matias Bjørling 已提交
876

877
	ret = nvme_init_iod(req, dev);
878
	if (ret)
879
		goto out_free_cmd;
M
Matias Bjørling 已提交
880

881
	if (blk_rq_nr_phys_segments(req)) {
882
		ret = nvme_map_data(dev, req, &cmnd);
883 884 885
		if (ret)
			goto out_cleanup_iod;
	}
M
Matias Bjørling 已提交
886

887
	blk_mq_start_request(req);
M
Matias Bjørling 已提交
888

C
Christoph Hellwig 已提交
889
	spin_lock_irq(&nvmeq->q_lock);
890
	if (unlikely(nvmeq->cq_vector < 0)) {
891
		ret = BLK_STS_IOERR;
892
		spin_unlock_irq(&nvmeq->q_lock);
893
		goto out_cleanup_iod;
894
	}
C
Christoph Hellwig 已提交
895
	__nvme_submit_cmd(nvmeq, &cmnd);
M
Matias Bjørling 已提交
896 897
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
898
	return BLK_STS_OK;
899
out_cleanup_iod:
C
Christoph Hellwig 已提交
900
	nvme_free_iod(dev, req);
901 902
out_free_cmd:
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
903
	return ret;
M
Matthew Wilcox 已提交
904
}
K
Keith Busch 已提交
905

906
static void nvme_pci_complete_rq(struct request *req)
907
{
C
Christoph Hellwig 已提交
908
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
909

910 911
	nvme_unmap_data(iod->nvmeq->dev, req);
	nvme_complete_rq(req);
M
Matthew Wilcox 已提交
912 913
}

914 915 916 917 918 919 920
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
		u16 phase)
{
	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}

921
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
922
{
923
	u16 head = nvmeq->cq_head;
924

925 926 927 928 929 930
	if (likely(nvmeq->cq_vector >= 0)) {
		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
						      nvmeq->dbbuf_cq_ei))
			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
	}
}
931

932 933 934 935
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
		struct nvme_completion *cqe)
{
	struct request *req;
936

937 938 939 940 941
	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
		dev_warn(nvmeq->dev->ctrl.device,
			"invalid id %d completed on queue %d\n",
			cqe->command_id, le16_to_cpu(cqe->sq_id));
		return;
M
Matthew Wilcox 已提交
942 943
	}

944 945 946 947 948 949 950
	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
	if (unlikely(nvmeq->qid == 0 &&
K
Keith Busch 已提交
951
			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
952 953
		nvme_complete_async_event(&nvmeq->dev->ctrl,
				cqe->status, &cqe->result);
J
Jens Axboe 已提交
954
		return;
955
	}
M
Matthew Wilcox 已提交
956

957
	nvmeq->cqe_seen = 1;
958 959 960
	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
	nvme_end_request(req, cqe->status, cqe->result);
}
M
Matthew Wilcox 已提交
961

962 963
static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
		struct nvme_completion *cqe)
M
Matthew Wilcox 已提交
964
{
965 966
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
		*cqe = nvmeq->cqes[nvmeq->cq_head];
967

968 969 970
		if (++nvmeq->cq_head == nvmeq->q_depth) {
			nvmeq->cq_head = 0;
			nvmeq->cq_phase = !nvmeq->cq_phase;
M
Matthew Wilcox 已提交
971
		}
972
		return true;
M
Matthew Wilcox 已提交
973
	}
974
	return false;
J
Jens Axboe 已提交
975 976 977 978
}

static void nvme_process_cq(struct nvme_queue *nvmeq)
{
979 980
	struct nvme_completion cqe;
	int consumed = 0;
M
Matthew Wilcox 已提交
981

982 983 984 985
	while (nvme_read_cqe(nvmeq, &cqe)) {
		nvme_handle_cqe(nvmeq, &cqe);
		consumed++;
	}
986

987
	if (consumed)
988
		nvme_ring_cq_doorbell(nvmeq);
M
Matthew Wilcox 已提交
989 990 991
}

static irqreturn_t nvme_irq(int irq, void *data)
992 993 994 995
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
996 997 998
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
999 1000 1001 1002 1003 1004 1005
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
1006 1007 1008
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
1009 1010
}

K
Keith Busch 已提交
1011
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
J
Jens Axboe 已提交
1012
{
1013 1014
	struct nvme_completion cqe;
	int found = 0, consumed = 0;
J
Jens Axboe 已提交
1015

1016 1017
	if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return 0;
J
Jens Axboe 已提交
1018

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	spin_lock_irq(&nvmeq->q_lock);
	while (nvme_read_cqe(nvmeq, &cqe)) {
		nvme_handle_cqe(nvmeq, &cqe);
		consumed++;

		if (tag == cqe.command_id) {
			found = 1;
			break;
		}
       }

	if (consumed)
		nvme_ring_cq_doorbell(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);

	return found;
J
Jens Axboe 已提交
1035 1036
}

K
Keith Busch 已提交
1037 1038 1039 1040 1041 1042 1043
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	return __nvme_poll(nvmeq, tag);
}

1044
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
M
Matthew Wilcox 已提交
1045
{
1046
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1047
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
1048
	struct nvme_command c;
M
Matthew Wilcox 已提交
1049

M
Matias Bjørling 已提交
1050 1051
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
1052
	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1053

1054
	spin_lock_irq(&nvmeq->q_lock);
1055
	__nvme_submit_cmd(nvmeq, &c);
1056
	spin_unlock_irq(&nvmeq->q_lock);
1057 1058
}

M
Matthew Wilcox 已提交
1059
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1060
{
M
Matthew Wilcox 已提交
1061 1062 1063 1064 1065 1066
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

1067
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1068 1069 1070 1071 1072 1073 1074 1075
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

1076
	/*
M
Minwoo Im 已提交
1077
	 * Note: we (ab)use the fact that the prp fields survive if no data
1078 1079
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1080 1081 1082 1083 1084 1085 1086 1087
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

1088
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1089 1090 1091 1092 1093 1094
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
1095
	int flags = NVME_QUEUE_PHYS_CONTIG;
M
Matthew Wilcox 已提交
1096

1097
	/*
M
Minwoo Im 已提交
1098
	 * Note: we (ab)use the fact that the prp fields survive if no data
1099 1100
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1101 1102 1103 1104 1105 1106 1107 1108
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

1109
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

1122
static void abort_endio(struct request *req, blk_status_t error)
1123
{
C
Christoph Hellwig 已提交
1124 1125
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
1126

1127 1128
	dev_warn(nvmeq->dev->ctrl.device,
		 "Abort status: 0x%x", nvme_req(req)->status);
1129 1130
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
1131 1132
}

K
Keith Busch 已提交
1133 1134 1135 1136 1137 1138 1139 1140
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{

	/* If true, indicates loss of adapter communication, possibly by a
	 * NVMe Subsystem reset.
	 */
	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);

1141 1142 1143
	/* If there is a reset/reinit ongoing, we shouldn't reset again. */
	switch (dev->ctrl.state) {
	case NVME_CTRL_RESETTING:
1144
	case NVME_CTRL_CONNECTING:
K
Keith Busch 已提交
1145
		return false;
1146 1147 1148
	default:
		break;
	}
K
Keith Busch 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

	/* We shouldn't reset unless the controller is on fatal error state
	 * _or_ if we lost the communication with it.
	 */
	if (!(csts & NVME_CSTS_CFS) && !nssro)
		return false;

	/* If PCI error recovery process is happening, we cannot reset or
	 * the recovery mechanism will surely fail.
	 */
	if (pci_channel_offline(to_pci_dev(dev->dev)))
		return false;

	return true;
}

static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
	/* Read a config register to help see what died. */
	u16 pci_status;
	int result;

	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
				      &pci_status);
	if (result == PCIBIOS_SUCCESSFUL)
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
			 csts, pci_status);
	else
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
			 csts, result);
}

1183
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
K
Keith Busch 已提交
1184
{
C
Christoph Hellwig 已提交
1185 1186
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
K
Keith Busch 已提交
1187
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
1188 1189
	struct request *abort_req;
	struct nvme_command cmd;
K
Keith Busch 已提交
1190 1191 1192 1193 1194 1195 1196 1197
	u32 csts = readl(dev->bar + NVME_REG_CSTS);

	/*
	 * Reset immediately if the controller is failed
	 */
	if (nvme_should_reset(dev, csts)) {
		nvme_warn_reset(dev, csts);
		nvme_dev_disable(dev, false);
1198
		nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1199 1200
		return BLK_EH_HANDLED;
	}
K
Keith Busch 已提交
1201

K
Keith Busch 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	/*
	 * Did we miss an interrupt?
	 */
	if (__nvme_poll(nvmeq, req->tag)) {
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
		return BLK_EH_HANDLED;
	}

1212
	/*
1213 1214 1215 1216 1217
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 */
1218
	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
1219
		dev_warn(dev->ctrl.device,
1220 1221
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
1222
		nvme_dev_disable(dev, false);
1223
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1224
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
1225 1226
	}

1227 1228 1229 1230
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
1231
	 */
C
Christoph Hellwig 已提交
1232
	if (!nvmeq->qid || iod->aborted) {
1233
		dev_warn(dev->ctrl.device,
1234 1235
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
1236
		nvme_dev_disable(dev, false);
1237
		nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1238

1239 1240 1241 1242
		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
1243
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1244
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
1245 1246
	}

1247
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1248
		atomic_inc(&dev->ctrl.abort_limit);
1249
		return BLK_EH_RESET_TIMER;
1250
	}
1251
	iod->aborted = 1;
M
Matias Bjørling 已提交
1252

K
Keith Busch 已提交
1253 1254
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
1255
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
1256 1257
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

1258 1259 1260
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
1261 1262

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1263
			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1264 1265 1266 1267 1268 1269 1270 1271
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
K
Keith Busch 已提交
1272

1273 1274 1275 1276 1277 1278
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
K
Keith Busch 已提交
1279 1280
}

M
Matias Bjørling 已提交
1281 1282
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
1283 1284
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1285 1286
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1287 1288 1289
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}

1290
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1291 1292 1293
{
	int i;

1294 1295
	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
		dev->ctrl.queue_count--;
1296
		nvme_free_queue(&dev->queues[i]);
1297
	}
1298 1299
}

K
Keith Busch 已提交
1300 1301 1302 1303 1304
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
1305
{
K
Keith Busch 已提交
1306
	int vector;
M
Matthew Wilcox 已提交
1307

1308
	spin_lock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
1309 1310 1311 1312
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
1313
	vector = nvmeq->cq_vector;
K
Keith Busch 已提交
1314
	nvmeq->dev->online_queues--;
K
Keith Busch 已提交
1315
	nvmeq->cq_vector = -1;
1316 1317
	spin_unlock_irq(&nvmeq->q_lock);

1318
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1319
		blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1320

1321
	pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
M
Matthew Wilcox 已提交
1322

K
Keith Busch 已提交
1323 1324
	return 0;
}
M
Matthew Wilcox 已提交
1325

1326
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
K
Keith Busch 已提交
1327
{
1328
	struct nvme_queue *nvmeq = &dev->queues[0];
K
Keith Busch 已提交
1329

1330 1331 1332
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
1333
		nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1334 1335 1336 1337

	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1338 1339
}

1340 1341 1342 1343
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1344 1345
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1346 1347

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1348
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1349
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1350
		q_depth = div_u64(mem_per_q, entry_size);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
1367
	if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1368 1369
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
						      dev->ctrl.page_size);
1370
		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
		nvmeq->sq_cmds_io = dev->cmb + offset;
	} else {
		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
		if (!nvmeq->sq_cmds)
			return -ENOMEM;
	}

	return 0;
}

1382 1383
static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
		int depth, int node)
M
Matthew Wilcox 已提交
1384
{
1385
	struct nvme_queue *nvmeq = &dev->queues[qid];
M
Matthew Wilcox 已提交
1386

1387 1388
	if (dev->ctrl.queue_count > qid)
		return 0;
M
Matthew Wilcox 已提交
1389

1390
	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
J
Joe Perches 已提交
1391
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1392 1393 1394
	if (!nvmeq->cqes)
		goto free_nvmeq;

1395
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
M
Matthew Wilcox 已提交
1396 1397
		goto free_cqdma;

1398
	nvmeq->q_dmadev = dev->dev;
M
Matthew Wilcox 已提交
1399
	nvmeq->dev = dev;
M
Matthew Wilcox 已提交
1400 1401
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1402
	nvmeq->cq_phase = 1;
1403
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1404
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1405
	nvmeq->qid = qid;
1406
	nvmeq->cq_vector = -1;
1407
	dev->ctrl.queue_count++;
1408

1409
	return 0;
M
Matthew Wilcox 已提交
1410 1411

 free_cqdma:
1412
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1413 1414
							nvmeq->cq_dma_addr);
 free_nvmeq:
1415
	return -ENOMEM;
M
Matthew Wilcox 已提交
1416 1417
}

1418
static int queue_request_irq(struct nvme_queue *nvmeq)
1419
{
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
	int nr = nvmeq->dev->ctrl.instance;

	if (use_threaded_interrupts) {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	} else {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	}
1430 1431
}

1432
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1433
{
1434
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1435

1436
	spin_lock_irq(&nvmeq->q_lock);
1437 1438 1439
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1440
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1441
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1442
	nvme_dbbuf_init(dev, nvmeq, qid);
K
Keith Busch 已提交
1443
	dev->online_queues++;
1444
	spin_unlock_irq(&nvmeq->q_lock);
1445 1446 1447 1448 1449 1450
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1451

K
Keith Busch 已提交
1452
	nvmeq->cq_vector = qid - 1;
M
Matthew Wilcox 已提交
1453 1454
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1455
		return result;
M
Matthew Wilcox 已提交
1456 1457 1458 1459 1460

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1461
	nvme_init_queue(nvmeq, qid);
1462
	result = queue_request_irq(nvmeq);
M
Matthew Wilcox 已提交
1463 1464 1465
	if (result < 0)
		goto release_sq;

1466
	return result;
M
Matthew Wilcox 已提交
1467 1468 1469 1470 1471

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1472
	return result;
M
Matthew Wilcox 已提交
1473 1474
}

1475
static const struct blk_mq_ops nvme_mq_admin_ops = {
1476
	.queue_rq	= nvme_queue_rq,
1477
	.complete	= nvme_pci_complete_rq,
M
Matias Bjørling 已提交
1478
	.init_hctx	= nvme_admin_init_hctx,
1479
	.exit_hctx      = nvme_admin_exit_hctx,
1480
	.init_request	= nvme_init_request,
M
Matias Bjørling 已提交
1481 1482 1483
	.timeout	= nvme_timeout,
};

1484
static const struct blk_mq_ops nvme_mq_ops = {
M
Matias Bjørling 已提交
1485
	.queue_rq	= nvme_queue_rq,
1486
	.complete	= nvme_pci_complete_rq,
M
Matias Bjørling 已提交
1487 1488
	.init_hctx	= nvme_init_hctx,
	.init_request	= nvme_init_request,
1489
	.map_queues	= nvme_pci_map_queues,
M
Matias Bjørling 已提交
1490
	.timeout	= nvme_timeout,
J
Jens Axboe 已提交
1491
	.poll		= nvme_poll,
M
Matias Bjørling 已提交
1492 1493
};

1494 1495
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
1496
	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1497 1498 1499 1500 1501
		/*
		 * If the controller was reset during removal, it's possible
		 * user requests may be waiting on a stopped queue. Start the
		 * queue to flush these to completion.
		 */
1502
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1503
		blk_cleanup_queue(dev->ctrl.admin_q);
1504 1505 1506 1507
		blk_mq_free_tag_set(&dev->admin_tagset);
	}
}

M
Matias Bjørling 已提交
1508 1509
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
1510
	if (!dev->ctrl.admin_q) {
M
Matias Bjørling 已提交
1511 1512
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
K
Keith Busch 已提交
1513

K
Keith Busch 已提交
1514
		dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
M
Matias Bjørling 已提交
1515
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1516
		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
C
Chaitanya Kulkarni 已提交
1517
		dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
1518
		dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
M
Matias Bjørling 已提交
1519 1520 1521 1522
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;
1523
		dev->ctrl.admin_tagset = &dev->admin_tagset;
M
Matias Bjørling 已提交
1524

1525 1526
		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
		if (IS_ERR(dev->ctrl.admin_q)) {
M
Matias Bjørling 已提交
1527 1528 1529
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
1530
		if (!blk_get_queue(dev->ctrl.admin_q)) {
1531
			nvme_dev_remove_admin(dev);
1532
			dev->ctrl.admin_q = NULL;
1533 1534
			return -ENODEV;
		}
K
Keith Busch 已提交
1535
	} else
1536
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
M
Matias Bjørling 已提交
1537 1538 1539 1540

	return 0;
}

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
}

static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	if (size <= dev->bar_mapped_size)
		return 0;
	if (size > pci_resource_len(pdev, 0))
		return -ENOMEM;
	if (dev->bar)
		iounmap(dev->bar);
	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
	if (!dev->bar) {
		dev->bar_mapped_size = 0;
		return -ENOMEM;
	}
	dev->bar_mapped_size = size;
	dev->dbs = dev->bar + NVME_REG_DBS;

	return 0;
}

1567
static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1568
{
1569
	int result;
M
Matthew Wilcox 已提交
1570 1571 1572
	u32 aqa;
	struct nvme_queue *nvmeq;

1573 1574 1575 1576
	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
	if (result < 0)
		return result;

1577
	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1578
				NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1579

1580 1581 1582
	if (dev->subsystem &&
	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1583

1584
	result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1585 1586
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1587

1588 1589 1590 1591
	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
			dev_to_node(dev->dev));
	if (result)
		return result;
M
Matthew Wilcox 已提交
1592

1593
	nvmeq = &dev->queues[0];
M
Matthew Wilcox 已提交
1594 1595 1596
	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1597 1598 1599
	writel(aqa, dev->bar + NVME_REG_AQA);
	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
M
Matthew Wilcox 已提交
1600

1601
	result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
1602
	if (result)
K
Keith Busch 已提交
1603
		return result;
M
Matias Bjørling 已提交
1604

K
Keith Busch 已提交
1605
	nvmeq->cq_vector = 0;
1606
	nvme_init_queue(nvmeq, 0);
1607
	result = queue_request_irq(nvmeq);
1608 1609
	if (result) {
		nvmeq->cq_vector = -1;
K
Keith Busch 已提交
1610
		return result;
1611
	}
1612

M
Matthew Wilcox 已提交
1613 1614 1615
	return result;
}

1616
static int nvme_create_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1617
{
1618
	unsigned i, max;
1619
	int ret = 0;
K
Keith Busch 已提交
1620

1621
	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1622
		/* vector == qid - 1, match nvme_create_queue */
1623
		if (nvme_alloc_queue(dev, i, dev->q_depth,
1624
		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
1625
			ret = -ENOMEM;
K
Keith Busch 已提交
1626
			break;
1627 1628
		}
	}
K
Keith Busch 已提交
1629

1630
	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1631
	for (i = dev->online_queues; i <= max; i++) {
1632
		ret = nvme_create_queue(&dev->queues[i], i);
K
Keith Busch 已提交
1633
		if (ret)
K
Keith Busch 已提交
1634
			break;
M
Matthew Wilcox 已提交
1635
	}
1636 1637 1638

	/*
	 * Ignore failing Create SQ/CQ commands, we can continue with less
1639 1640
	 * than the desired amount of queues, and even a controller without
	 * I/O queues can still be used to issue admin commands.  This might
1641 1642 1643
	 * be useful to upgrade a buggy firmware for example.
	 */
	return ret >= 0 ? 0 : ret;
M
Matthew Wilcox 已提交
1644 1645
}

1646 1647 1648 1649 1650 1651
static ssize_t nvme_cmb_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));

1652
	return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1653 1654 1655 1656
		       ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);

1657
static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1658
{
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;

	return 1ULL << (12 + 4 * szu);
}

static u32 nvme_cmb_size(struct nvme_dev *dev)
{
	return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
}

1669
static void nvme_map_cmb(struct nvme_dev *dev)
1670
{
1671
	u64 size, offset;
1672 1673
	resource_size_t bar_size;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1674
	int bar;
1675

1676
	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1677 1678
	if (!dev->cmbsz)
		return;
1679
	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1680

1681
	if (!use_cmb_sqes)
1682
		return;
1683

1684 1685
	size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
	offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1686 1687
	bar = NVME_CMB_BIR(dev->cmbloc);
	bar_size = pci_resource_len(pdev, bar);
1688 1689

	if (offset > bar_size)
1690
		return;
1691 1692 1693 1694 1695 1696 1697 1698 1699

	/*
	 * Controllers may support a CMB size larger than their BAR,
	 * for example, due to being behind a bridge. Reduce the CMB to
	 * the reported size of the BAR
	 */
	if (size > bar_size - offset)
		size = bar_size - offset;

1700 1701 1702
	dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
	if (!dev->cmb)
		return;
1703
	dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1704
	dev->cmb_size = size;
1705 1706 1707 1708 1709

	if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
				    &dev_attr_cmb.attr, NULL))
		dev_warn(dev->ctrl.device,
			 "failed to add sysfs attribute for CMB\n");
1710 1711 1712 1713 1714 1715 1716
}

static inline void nvme_release_cmb(struct nvme_dev *dev)
{
	if (dev->cmb) {
		iounmap(dev->cmb);
		dev->cmb = NULL;
1717 1718 1719
		sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
					     &dev_attr_cmb.attr, NULL);
		dev->cmbsz = 0;
1720 1721 1722
	}
}

1723 1724
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
1725
	u64 dma_addr = dev->host_mem_descs_dma;
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	struct nvme_command c;
	int ret;

	memset(&c, 0, sizeof(c));
	c.features.opcode	= nvme_admin_set_features;
	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
	c.features.dword11	= cpu_to_le32(bits);
	c.features.dword12	= cpu_to_le32(dev->host_mem_size >>
					      ilog2(dev->ctrl.page_size));
	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);

	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
	if (ret) {
		dev_warn(dev->ctrl.device,
			 "failed to set host mem (err %d, flags %#x).\n",
			 ret, bits);
	}
	return ret;
}

static void nvme_free_host_mem(struct nvme_dev *dev)
{
	int i;

	for (i = 0; i < dev->nr_host_mem_descs; i++) {
		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;

		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
				le64_to_cpu(desc->addr));
	}

	kfree(dev->host_mem_desc_bufs);
	dev->host_mem_desc_bufs = NULL;
1762 1763 1764
	dma_free_coherent(dev->dev,
			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
			dev->host_mem_descs, dev->host_mem_descs_dma);
1765
	dev->host_mem_descs = NULL;
1766
	dev->nr_host_mem_descs = 0;
1767 1768
}

1769 1770
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
		u32 chunk_size)
K
Keith Busch 已提交
1771
{
1772
	struct nvme_host_mem_buf_desc *descs;
1773
	u32 max_entries, len;
1774
	dma_addr_t descs_dma;
1775
	int i = 0;
1776
	void **bufs;
1777
	u64 size, tmp;
1778 1779 1780 1781

	tmp = (preferred + chunk_size - 1);
	do_div(tmp, chunk_size);
	max_entries = tmp;
1782 1783 1784 1785

	if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
		max_entries = dev->ctrl.hmmaxd;

1786 1787
	descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
			&descs_dma, GFP_KERNEL);
1788 1789 1790 1791 1792 1793 1794
	if (!descs)
		goto out;

	bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
	if (!bufs)
		goto out_free_descs;

1795
	for (size = 0; size < preferred && i < max_entries; size += len) {
1796 1797
		dma_addr_t dma_addr;

1798
		len = min_t(u64, chunk_size, preferred - size);
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
		if (!bufs[i])
			break;

		descs[i].addr = cpu_to_le64(dma_addr);
		descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
		i++;
	}

1809
	if (!size)
1810 1811 1812 1813 1814
		goto out_free_bufs;

	dev->nr_host_mem_descs = i;
	dev->host_mem_size = size;
	dev->host_mem_descs = descs;
1815
	dev->host_mem_descs_dma = descs_dma;
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	dev->host_mem_desc_bufs = bufs;
	return 0;

out_free_bufs:
	while (--i >= 0) {
		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;

		dma_free_coherent(dev->dev, size, bufs[i],
				le64_to_cpu(descs[i].addr));
	}

	kfree(bufs);
out_free_descs:
1829 1830
	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
			descs_dma);
1831 1832 1833 1834 1835
out:
	dev->host_mem_descs = NULL;
	return -ENOMEM;
}

1836 1837 1838 1839 1840
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
	u32 chunk_size;

	/* start big and work our way down */
1841
	for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
1842
	     chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
	     chunk_size /= 2) {
		if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
			if (!min || dev->host_mem_size >= min)
				return 0;
			nvme_free_host_mem(dev);
		}
	}

	return -ENOMEM;
}

1854
static int nvme_setup_host_mem(struct nvme_dev *dev)
1855 1856 1857 1858 1859
{
	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
	u64 min = (u64)dev->ctrl.hmmin * 4096;
	u32 enable_bits = NVME_HOST_MEM_ENABLE;
1860
	int ret;
1861 1862 1863 1864 1865 1866 1867

	preferred = min(preferred, max);
	if (min > max) {
		dev_warn(dev->ctrl.device,
			"min host memory (%lld MiB) above limit (%d MiB).\n",
			min >> ilog2(SZ_1M), max_host_mem_size_mb);
		nvme_free_host_mem(dev);
1868
		return 0;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
	}

	/*
	 * If we already have a buffer allocated check if we can reuse it.
	 */
	if (dev->host_mem_descs) {
		if (dev->host_mem_size >= min)
			enable_bits |= NVME_HOST_MEM_RETURN;
		else
			nvme_free_host_mem(dev);
	}

	if (!dev->host_mem_descs) {
1882 1883 1884
		if (nvme_alloc_host_mem(dev, min, preferred)) {
			dev_warn(dev->ctrl.device,
				"failed to allocate host memory buffer.\n");
1885
			return 0; /* controller must work without HMB */
1886 1887 1888 1889 1890
		}

		dev_info(dev->ctrl.device,
			"allocated %lld MiB host memory buffer.\n",
			dev->host_mem_size >> ilog2(SZ_1M));
1891 1892
	}

1893 1894
	ret = nvme_set_host_mem(dev, enable_bits);
	if (ret)
1895
		nvme_free_host_mem(dev);
1896
	return ret;
K
Keith Busch 已提交
1897 1898
}

1899
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1900
{
1901
	struct nvme_queue *adminq = &dev->queues[0];
1902
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1903 1904
	int result, nr_io_queues;
	unsigned long size;
M
Matthew Wilcox 已提交
1905

1906
	nr_io_queues = num_present_cpus();
C
Christoph Hellwig 已提交
1907 1908
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
	if (result < 0)
M
Matthew Wilcox 已提交
1909
		return result;
C
Christoph Hellwig 已提交
1910

1911
	if (nr_io_queues == 0)
1912
		return 0;
M
Matthew Wilcox 已提交
1913

1914
	if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1915 1916 1917 1918 1919 1920 1921 1922
		result = nvme_cmb_qdepth(dev, nr_io_queues,
				sizeof(struct nvme_command));
		if (result > 0)
			dev->q_depth = result;
		else
			nvme_release_cmb(dev);
	}

1923 1924 1925 1926 1927 1928 1929 1930 1931
	do {
		size = db_bar_size(dev, nr_io_queues);
		result = nvme_remap_bar(dev, size);
		if (!result)
			break;
		if (!--nr_io_queues)
			return -ENOMEM;
	} while (1);
	adminq->q_db = dev->dbs;
1932

K
Keith Busch 已提交
1933
	/* Deregister the admin queue's interrupt */
1934
	pci_free_irq(pdev, 0, adminq);
K
Keith Busch 已提交
1935

1936 1937 1938 1939
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
1940 1941 1942 1943 1944 1945
	pci_free_irq_vectors(pdev);
	nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
			PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
	if (nr_io_queues <= 0)
		return -EIO;
	dev->max_qid = nr_io_queues;
R
Ramachandra Rao Gajula 已提交
1946

1947 1948 1949 1950 1951 1952 1953
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */

1954
	result = queue_request_irq(adminq);
1955 1956
	if (result) {
		adminq->cq_vector = -1;
K
Keith Busch 已提交
1957
		return result;
1958
	}
1959
	return nvme_create_io_queues(dev);
M
Matthew Wilcox 已提交
1960 1961
}

1962
static void nvme_del_queue_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
1963
{
K
Keith Busch 已提交
1964
	struct nvme_queue *nvmeq = req->end_io_data;
1965

K
Keith Busch 已提交
1966 1967
	blk_mq_free_request(req);
	complete(&nvmeq->dev->ioq_wait);
K
Keith Busch 已提交
1968 1969
}

1970
static void nvme_del_cq_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
1971
{
K
Keith Busch 已提交
1972
	struct nvme_queue *nvmeq = req->end_io_data;
K
Keith Busch 已提交
1973

K
Keith Busch 已提交
1974 1975 1976
	if (!error) {
		unsigned long flags;

1977 1978 1979 1980 1981 1982 1983
		/*
		 * We might be called with the AQ q_lock held
		 * and the I/O queue q_lock should always
		 * nest inside the AQ one.
		 */
		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
					SINGLE_DEPTH_NESTING);
K
Keith Busch 已提交
1984 1985
		nvme_process_cq(nvmeq);
		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
K
Keith Busch 已提交
1986
	}
K
Keith Busch 已提交
1987 1988

	nvme_del_queue_end(req, error);
K
Keith Busch 已提交
1989 1990
}

K
Keith Busch 已提交
1991
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1992
{
K
Keith Busch 已提交
1993 1994 1995
	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
	struct request *req;
	struct nvme_command cmd;
1996

K
Keith Busch 已提交
1997 1998 1999
	memset(&cmd, 0, sizeof(cmd));
	cmd.delete_queue.opcode = opcode;
	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2000

2001
	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
K
Keith Busch 已提交
2002 2003
	if (IS_ERR(req))
		return PTR_ERR(req);
2004

K
Keith Busch 已提交
2005 2006 2007 2008 2009 2010 2011
	req->timeout = ADMIN_TIMEOUT;
	req->end_io_data = nvmeq;

	blk_execute_rq_nowait(q, NULL, req, false,
			opcode == nvme_admin_delete_cq ?
				nvme_del_cq_end : nvme_del_queue_end);
	return 0;
2012 2013
}

2014
static void nvme_disable_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
2015
{
2016
	int pass, queues = dev->online_queues - 1;
K
Keith Busch 已提交
2017 2018
	unsigned long timeout;
	u8 opcode = nvme_admin_delete_sq;
K
Keith Busch 已提交
2019

K
Keith Busch 已提交
2020
	for (pass = 0; pass < 2; pass++) {
K
Keith Busch 已提交
2021
		int sent = 0, i = queues;
K
Keith Busch 已提交
2022 2023 2024 2025

		reinit_completion(&dev->ioq_wait);
 retry:
		timeout = ADMIN_TIMEOUT;
2026
		for (; i > 0; i--, sent++)
2027
			if (nvme_delete_queue(&dev->queues[i], opcode))
K
Keith Busch 已提交
2028
				break;
2029

K
Keith Busch 已提交
2030 2031 2032 2033 2034 2035 2036 2037 2038
		while (sent--) {
			timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
			if (timeout == 0)
				return;
			if (i)
				goto retry;
		}
		opcode = nvme_admin_delete_cq;
	}
K
Keith Busch 已提交
2039 2040
}

2041
/*
2042
 * return error value only when tagset allocation failed
2043
 */
2044
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2045
{
2046 2047
	int ret;

2048
	if (!dev->ctrl.tagset) {
2049 2050 2051 2052 2053
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev_to_node(dev->dev);
		dev->tagset.queue_depth =
M
Matias Bjørling 已提交
2054
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
C
Chaitanya Kulkarni 已提交
2055 2056 2057 2058 2059
		dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
		if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
			dev->tagset.cmd_size = max(dev->tagset.cmd_size,
					nvme_pci_cmd_size(dev, true));
		}
2060 2061
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
M
Matthew Wilcox 已提交
2062

2063 2064 2065 2066 2067 2068
		ret = blk_mq_alloc_tag_set(&dev->tagset);
		if (ret) {
			dev_warn(dev->ctrl.device,
				"IO queues tagset allocation failed %d\n", ret);
			return ret;
		}
2069
		dev->ctrl.tagset = &dev->tagset;
2070 2071

		nvme_dbbuf_set(dev);
2072 2073 2074 2075 2076
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
2077
	}
2078

K
Keith Busch 已提交
2079
	return 0;
M
Matthew Wilcox 已提交
2080 2081
}

2082
static int nvme_pci_enable(struct nvme_dev *dev)
2083
{
2084
	int result = -ENOMEM;
2085
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2086 2087 2088 2089 2090 2091

	if (pci_enable_device_mem(pdev))
		return result;

	pci_set_master(pdev);

2092 2093
	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
2094
		goto disable;
2095

2096
	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
K
Keith Busch 已提交
2097
		result = -ENODEV;
2098
		goto disable;
K
Keith Busch 已提交
2099
	}
2100 2101

	/*
2102 2103 2104
	 * Some devices and/or platforms don't advertise or work with INTx
	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
	 * adjust this later.
2105
	 */
2106 2107 2108
	result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (result < 0)
		return result;
2109

2110
	dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2111

2112
	dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2113
				io_queue_depth);
2114
	dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2115
	dev->dbs = dev->bar + 4096;
2116 2117 2118 2119 2120 2121 2122

	/*
	 * Temporary fix for the Apple controller found in the MacBook8,1 and
	 * some MacBook7,1 to avoid controller resets and data loss.
	 */
	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
		dev->q_depth = 2;
2123 2124
		dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
			"set queue depth=%u to work around controller resets\n",
2125
			dev->q_depth);
2126 2127
	} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
		   (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2128
		   NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2129 2130 2131
		dev->q_depth = 64;
		dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
                        "set queue depth=%u\n", dev->q_depth);
2132 2133
	}

2134
	nvme_map_cmb(dev);
2135

K
Keith Busch 已提交
2136 2137
	pci_enable_pcie_error_reporting(pdev);
	pci_save_state(pdev);
2138 2139 2140 2141 2142 2143 2144 2145
	return 0;

 disable:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
2146 2147 2148
{
	if (dev->bar)
		iounmap(dev->bar);
2149
	pci_release_mem_regions(to_pci_dev(dev->dev));
2150 2151 2152
}

static void nvme_pci_disable(struct nvme_dev *dev)
2153
{
2154 2155
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2156
	nvme_release_cmb(dev);
2157
	pci_free_irq_vectors(pdev);
2158

K
Keith Busch 已提交
2159 2160
	if (pci_is_enabled(pdev)) {
		pci_disable_pcie_error_reporting(pdev);
2161
		pci_disable_device(pdev);
K
Keith Busch 已提交
2162 2163 2164
	}
}

2165
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
M
Matthew Wilcox 已提交
2166
{
2167
	int i;
K
Keith Busch 已提交
2168 2169
	bool dead = true;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2170

2171
	mutex_lock(&dev->shutdown_lock);
K
Keith Busch 已提交
2172 2173 2174
	if (pci_is_enabled(pdev)) {
		u32 csts = readl(dev->bar + NVME_REG_CSTS);

K
Keith Busch 已提交
2175 2176
		if (dev->ctrl.state == NVME_CTRL_LIVE ||
		    dev->ctrl.state == NVME_CTRL_RESETTING)
K
Keith Busch 已提交
2177 2178 2179
			nvme_start_freeze(&dev->ctrl);
		dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
			pdev->error_state  != pci_channel_io_normal);
2180
	}
2181

K
Keith Busch 已提交
2182 2183 2184 2185
	/*
	 * Give the controller a chance to complete all entered requests if
	 * doing a safe shutdown.
	 */
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
	if (!dead) {
		if (shutdown)
			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);

		/*
		 * If the controller is still alive tell it to stop using the
		 * host memory buffer.  In theory the shutdown / reset should
		 * make sure that it doesn't access the host memoery anymore,
		 * but I'd rather be safe than sorry..
		 */
		if (dev->host_mem_descs)
			nvme_set_host_mem(dev, 0);

	}
K
Keith Busch 已提交
2200 2201
	nvme_stop_queues(&dev->ctrl);

2202 2203
	if (!dead) {
		nvme_disable_io_queues(dev);
2204
		nvme_disable_admin_queue(dev, shutdown);
K
Keith Busch 已提交
2205
	}
2206 2207 2208
	for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
		nvme_suspend_queue(&dev->queues[i]);

2209
	nvme_pci_disable(dev);
2210

2211 2212
	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
K
Keith Busch 已提交
2213 2214 2215 2216 2217 2218 2219 2220

	/*
	 * The driver will not be starting up queues again if shutting down so
	 * must flush all entered requests to their failed completion to avoid
	 * deadlocking blk-mq hot-cpu notifier.
	 */
	if (shutdown)
		nvme_start_queues(&dev->ctrl);
2221
	mutex_unlock(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
2222 2223
}

M
Matthew Wilcox 已提交
2224 2225
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
2226
	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
M
Matthew Wilcox 已提交
2227 2228 2229 2230
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

2231
	/* Optimisation for I/Os between 4k and 128k */
2232
	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2233 2234 2235 2236 2237
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
2238 2239 2240 2241 2242 2243
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
2244
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
2245 2246
}

2247
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2248
{
2249
	struct nvme_dev *dev = to_nvme_dev(ctrl);
2250

2251
	nvme_dbbuf_dma_free(dev);
2252
	put_device(dev->dev);
2253 2254
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
2255 2256
	if (dev->ctrl.admin_q)
		blk_put_queue(dev->ctrl.admin_q);
2257
	kfree(dev->queues);
2258
	free_opal_dev(dev->ctrl.opal_dev);
2259 2260 2261
	kfree(dev);
}

2262 2263
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
2264
	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
2265

2266
	nvme_get_ctrl(&dev->ctrl);
2267
	nvme_dev_disable(dev, false);
2268
	if (!queue_work(nvme_wq, &dev->remove_work))
2269 2270 2271
		nvme_put_ctrl(&dev->ctrl);
}

2272
static void nvme_reset_work(struct work_struct *work)
2273
{
2274 2275
	struct nvme_dev *dev =
		container_of(work, struct nvme_dev, ctrl.reset_work);
2276
	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2277
	int result = -ENODEV;
2278
	enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
2279

2280
	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
2281
		goto out;
2282

2283 2284 2285 2286
	/*
	 * If we're called to reset a live controller first shut it down before
	 * moving on.
	 */
2287
	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2288
		nvme_dev_disable(dev, false);
2289

2290
	/*
2291
	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2292 2293
	 * initializing procedure here.
	 */
2294
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2295
		dev_warn(dev->ctrl.device,
2296
			"failed to mark controller CONNECTING\n");
2297 2298 2299
		goto out;
	}

2300
	result = nvme_pci_enable(dev);
2301
	if (result)
2302
		goto out;
2303

2304
	result = nvme_pci_configure_admin_queue(dev);
2305
	if (result)
2306
		goto out;
2307

K
Keith Busch 已提交
2308 2309
	result = nvme_alloc_admin_tags(dev);
	if (result)
2310
		goto out;
2311

2312 2313
	result = nvme_init_identify(&dev->ctrl);
	if (result)
2314
		goto out;
2315

2316 2317 2318 2319 2320 2321 2322 2323 2324
	if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
		if (!dev->ctrl.opal_dev)
			dev->ctrl.opal_dev =
				init_opal_dev(&dev->ctrl, &nvme_sec_submit);
		else if (was_suspend)
			opal_unlock_from_suspend(dev->ctrl.opal_dev);
	} else {
		free_opal_dev(dev->ctrl.opal_dev);
		dev->ctrl.opal_dev = NULL;
2325
	}
2326

2327 2328 2329 2330 2331 2332 2333
	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
		result = nvme_dbbuf_dma_alloc(dev);
		if (result)
			dev_warn(dev->dev,
				 "unable to allocate dma for dbbuf\n");
	}

2334 2335 2336 2337 2338
	if (dev->ctrl.hmpre) {
		result = nvme_setup_host_mem(dev);
		if (result < 0)
			goto out;
	}
2339

2340
	result = nvme_setup_io_queues(dev);
2341
	if (result)
2342
		goto out;
2343

2344 2345 2346 2347
	/*
	 * Keep the controller around but remove all namespaces if we don't have
	 * any working I/O queue.
	 */
2348
	if (dev->online_queues < 2) {
2349
		dev_warn(dev->ctrl.device, "IO queues not created\n");
2350
		nvme_kill_queues(&dev->ctrl);
2351
		nvme_remove_namespaces(&dev->ctrl);
2352
		new_state = NVME_CTRL_ADMIN_ONLY;
2353
	} else {
2354
		nvme_start_queues(&dev->ctrl);
K
Keith Busch 已提交
2355
		nvme_wait_freeze(&dev->ctrl);
2356 2357 2358
		/* hit this only when allocate tagset fails */
		if (nvme_dev_add(dev))
			new_state = NVME_CTRL_ADMIN_ONLY;
K
Keith Busch 已提交
2359
		nvme_unfreeze(&dev->ctrl);
2360 2361
	}

2362 2363 2364 2365 2366 2367 2368
	/*
	 * If only admin queue live, keep it to do further investigation or
	 * recovery.
	 */
	if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
		dev_warn(dev->ctrl.device,
			"failed to mark controller state %d\n", new_state);
2369 2370
		goto out;
	}
2371

2372
	nvme_start_ctrl(&dev->ctrl);
2373
	return;
2374

2375
 out:
2376
	nvme_remove_dead_ctrl(dev, result);
2377 2378
}

2379
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
K
Keith Busch 已提交
2380
{
2381
	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2382
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
2383

2384
	nvme_kill_queues(&dev->ctrl);
K
Keith Busch 已提交
2385
	if (pci_get_drvdata(pdev))
K
Keith Busch 已提交
2386
		device_release_driver(&pdev->dev);
2387
	nvme_put_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2388 2389
}

2390
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
T
Tejun Heo 已提交
2391
{
2392
	*val = readl(to_nvme_dev(ctrl)->bar + off);
2393
	return 0;
T
Tejun Heo 已提交
2394 2395
}

2396
static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2397
{
2398 2399 2400
	writel(val, to_nvme_dev(ctrl)->bar + off);
	return 0;
}
2401

2402 2403 2404 2405
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
	*val = readq(to_nvme_dev(ctrl)->bar + off);
	return 0;
2406 2407
}

2408
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
M
Ming Lin 已提交
2409
	.name			= "pcie",
2410
	.module			= THIS_MODULE,
2411
	.flags			= NVME_F_METADATA_SUPPORTED,
2412
	.reg_read32		= nvme_pci_reg_read32,
2413
	.reg_write32		= nvme_pci_reg_write32,
2414
	.reg_read64		= nvme_pci_reg_read64,
2415
	.free_ctrl		= nvme_pci_free_ctrl,
2416
	.submit_async_event	= nvme_pci_submit_async_event,
2417
};
2418

2419 2420 2421 2422
static int nvme_dev_map(struct nvme_dev *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2423
	if (pci_request_mem_regions(pdev, "nvme"))
2424 2425
		return -ENODEV;

2426
	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2427 2428
		goto release;

M
Max Gurtovoy 已提交
2429
	return 0;
2430
  release:
M
Max Gurtovoy 已提交
2431 2432
	pci_release_mem_regions(pdev);
	return -ENODEV;
2433 2434
}

2435
static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
{
	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
		/*
		 * Several Samsung devices seem to drop off the PCIe bus
		 * randomly when APST is on and uses the deepest sleep state.
		 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
		 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
		 * 950 PRO 256GB", but it seems to be restricted to two Dell
		 * laptops.
		 */
		if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
			return NVME_QUIRK_NO_DEEPEST_PS;
2450 2451 2452 2453 2454 2455 2456 2457
	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
		/*
		 * Samsung SSD 960 EVO drops off the PCIe bus after system
		 * suspend on a Ryzen board, ASUS PRIME B350M-A.
		 */
		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
		    dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
			return NVME_QUIRK_NO_APST;
2458 2459 2460 2461 2462
	}

	return 0;
}

2463
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
2464
{
M
Matias Bjørling 已提交
2465
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
2466
	struct nvme_dev *dev;
2467
	unsigned long quirks = id->driver_data;
M
Matthew Wilcox 已提交
2468

M
Matias Bjørling 已提交
2469 2470
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
2471
		set_dev_node(&pdev->dev, first_memory_node);
M
Matias Bjørling 已提交
2472 2473

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2474 2475
	if (!dev)
		return -ENOMEM;
2476 2477 2478

	dev->queues = kcalloc_node(num_possible_cpus() + 1,
			sizeof(struct nvme_queue), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2479 2480 2481
	if (!dev->queues)
		goto free;

2482
	dev->dev = get_device(&pdev->dev);
K
Keith Busch 已提交
2483
	pci_set_drvdata(pdev, dev);
2484

2485 2486
	result = nvme_dev_map(dev);
	if (result)
2487
		goto put_pci;
2488

2489
	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2490
	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2491
	mutex_init(&dev->shutdown_lock);
K
Keith Busch 已提交
2492
	init_completion(&dev->ioq_wait);
M
Matthew Wilcox 已提交
2493

M
Matthew Wilcox 已提交
2494 2495
	result = nvme_setup_prp_pools(dev);
	if (result)
2496
		goto unmap;
2497

2498
	quirks |= check_vendor_combination_bug(pdev);
2499

2500
	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2501
			quirks);
2502
	if (result)
2503
		goto release_pools;
2504

2505 2506
	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

2507 2508
	nvme_reset_ctrl(&dev->ctrl);

M
Matthew Wilcox 已提交
2509 2510
	return 0;

2511
 release_pools:
M
Matthew Wilcox 已提交
2512
	nvme_release_prp_pools(dev);
2513 2514
 unmap:
	nvme_dev_unmap(dev);
K
Keith Busch 已提交
2515
 put_pci:
2516
	put_device(dev->dev);
M
Matthew Wilcox 已提交
2517 2518 2519 2520 2521 2522
 free:
	kfree(dev->queues);
	kfree(dev);
	return result;
}

2523
static void nvme_reset_prepare(struct pci_dev *pdev)
2524
{
K
Keith Busch 已提交
2525
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2526
	nvme_dev_disable(dev, false);
2527
}
2528

2529 2530
static void nvme_reset_done(struct pci_dev *pdev)
{
2531
	struct nvme_dev *dev = pci_get_drvdata(pdev);
S
Sagi Grimberg 已提交
2532
	nvme_reset_ctrl_sync(&dev->ctrl);
2533 2534
}

2535 2536 2537
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2538
	nvme_dev_disable(dev, true);
2539 2540
}

2541 2542 2543 2544 2545
/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
2546
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2547 2548
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2549

2550 2551
	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);

2552
	cancel_work_sync(&dev->ctrl.reset_work);
K
Keith Busch 已提交
2553
	pci_set_drvdata(pdev, NULL);
2554

2555
	if (!pci_device_is_present(pdev)) {
2556
		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2557 2558
		nvme_dev_disable(dev, false);
	}
2559

2560
	flush_work(&dev->ctrl.reset_work);
2561 2562
	nvme_stop_ctrl(&dev->ctrl);
	nvme_remove_namespaces(&dev->ctrl);
2563
	nvme_dev_disable(dev, true);
2564
	nvme_free_host_mem(dev);
M
Matias Bjørling 已提交
2565
	nvme_dev_remove_admin(dev);
2566
	nvme_free_queues(dev, 0);
2567
	nvme_uninit_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2568
	nvme_release_prp_pools(dev);
2569
	nvme_dev_unmap(dev);
2570
	nvme_put_ctrl(&dev->ctrl);
M
Matthew Wilcox 已提交
2571 2572
}

K
Keith Busch 已提交
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
{
	int ret = 0;

	if (numvfs == 0) {
		if (pci_vfs_assigned(pdev)) {
			dev_warn(&pdev->dev,
				"Cannot disable SR-IOV VFs while assigned\n");
			return -EPERM;
		}
		pci_disable_sriov(pdev);
		return 0;
	}

	ret = pci_enable_sriov(pdev, numvfs);
	return ret ? ret : numvfs;
}

2591
#ifdef CONFIG_PM_SLEEP
2592 2593 2594 2595 2596
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2597
	nvme_dev_disable(ndev, true);
2598 2599 2600 2601 2602 2603 2604 2605
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2606
	nvme_reset_ctrl(&ndev->ctrl);
K
Keith Busch 已提交
2607
	return 0;
2608
}
2609
#endif
2610 2611

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2612

K
Keith Busch 已提交
2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	/*
	 * A frozen channel requires a reset. When detected, this method will
	 * shutdown the controller to quiesce. The controller will be restarted
	 * after the slot reset through driver's slot_reset callback.
	 */
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
K
Keith Busch 已提交
2627 2628
		dev_warn(dev->ctrl.device,
			"frozen state error detected, reset controller\n");
2629
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2630 2631
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
K
Keith Busch 已提交
2632 2633
		dev_warn(dev->ctrl.device,
			"failure state error detected, request disconnect\n");
K
Keith Busch 已提交
2634 2635 2636 2637 2638 2639 2640 2641 2642
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

2643
	dev_info(dev->ctrl.device, "restart after slot reset\n");
K
Keith Busch 已提交
2644
	pci_restore_state(pdev);
2645
	nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2646 2647 2648 2649 2650 2651 2652 2653
	return PCI_ERS_RESULT_RECOVERED;
}

static void nvme_error_resume(struct pci_dev *pdev)
{
	pci_cleanup_aer_uncorrect_error_status(pdev);
}

2654
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2655 2656 2657
	.error_detected	= nvme_error_detected,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2658 2659
	.reset_prepare	= nvme_reset_prepare,
	.reset_done	= nvme_reset_done,
M
Matthew Wilcox 已提交
2660 2661
};

2662
static const struct pci_device_id nvme_id_table[] = {
2663
	{ PCI_VDEVICE(INTEL, 0x0953),
2664
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2665
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2666 2667
	{ PCI_VDEVICE(INTEL, 0x0a53),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2668
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2669 2670
	{ PCI_VDEVICE(INTEL, 0x0a54),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2671
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2672 2673 2674
	{ PCI_VDEVICE(INTEL, 0x0a55),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2675 2676
	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2677 2678
	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2679 2680
	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2681 2682
	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2683 2684
	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2685 2686 2687 2688
	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
C
Christoph Hellwig 已提交
2689 2690 2691 2692
	{ PCI_DEVICE(0x1d1d, 0x1f1f),	/* LighNVM qemu device */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
	{ PCI_DEVICE(0x1d1d, 0x2807),	/* CNEX WL */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
M
Matthew Wilcox 已提交
2693
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2694
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2695
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
M
Matthew Wilcox 已提交
2696 2697 2698 2699 2700 2701 2702 2703
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2704
	.remove		= nvme_remove,
2705
	.shutdown	= nvme_shutdown,
2706 2707 2708
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
K
Keith Busch 已提交
2709
	.sriov_configure = nvme_pci_sriov_configure,
M
Matthew Wilcox 已提交
2710 2711 2712 2713 2714
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2715
	return pci_register_driver(&nvme_driver);
M
Matthew Wilcox 已提交
2716 2717 2718 2719 2720
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
2721
	flush_workqueue(nvme_wq);
2722
	_nvme_check_size();
M
Matthew Wilcox 已提交
2723 2724 2725 2726
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2727
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2728 2729
module_init(nvme_init);
module_exit(nvme_exit);