pci.c 54.3 KB
Newer Older
M
Matthew Wilcox 已提交
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

K
Keith Busch 已提交
15
#include <linux/aer.h>
16
#include <linux/bitops.h>
M
Matthew Wilcox 已提交
17
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
18
#include <linux/blk-mq.h>
19
#include <linux/blk-mq-pci.h>
K
Keith Busch 已提交
20
#include <linux/cpu.h>
21
#include <linux/delay.h>
M
Matthew Wilcox 已提交
22 23 24
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
K
Keith Busch 已提交
25
#include <linux/hdreg.h>
26
#include <linux/idr.h>
M
Matthew Wilcox 已提交
27 28 29 30 31 32 33 34
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
35
#include <linux/mutex.h>
M
Matthew Wilcox 已提交
36
#include <linux/pci.h>
37
#include <linux/poison.h>
38
#include <linux/ptrace.h>
M
Matthew Wilcox 已提交
39 40
#include <linux/sched.h>
#include <linux/slab.h>
K
Keith Busch 已提交
41
#include <linux/t10-pi.h>
42
#include <linux/timer.h>
M
Matthew Wilcox 已提交
43
#include <linux/types.h>
44
#include <linux/io-64-nonatomic-lo-hi.h>
K
Keith Busch 已提交
45
#include <asm/unaligned.h>
46
#include <linux/sed-opal.h>
47

48 49
#include "nvme.h"

50
#define NVME_Q_DEPTH		1024
J
Jens Axboe 已提交
51
#define NVME_AQ_DEPTH		256
M
Matthew Wilcox 已提交
52 53
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
54

55 56 57 58
/*
 * We handle AEN commands ourselves and don't even let the
 * block layer know about them.
 */
59
#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AERS)
60

61 62 63
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

64 65 66 67
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

K
Keith Busch 已提交
68
static struct workqueue_struct *nvme_workq;
69

70 71
struct nvme_dev;
struct nvme_queue;
72

73
static int nvme_reset(struct nvme_dev *dev);
J
Jens Axboe 已提交
74
static void nvme_process_cq(struct nvme_queue *nvmeq);
75
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue **queues;
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned queue_count;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
	u32 db_stride;
	void __iomem *bar;
	struct work_struct reset_work;
95
	struct work_struct remove_work;
96
	struct timer_list watchdog_timer;
97
	struct mutex shutdown_lock;
98 99 100 101 102
	bool subsystem;
	void __iomem *cmb;
	dma_addr_t cmb_dma_addr;
	u64 cmb_size;
	u32 cmbsz;
103
	u32 cmbloc;
104
	struct nvme_ctrl ctrl;
K
Keith Busch 已提交
105
	struct completion ioq_wait;
K
Keith Busch 已提交
106
};
107

108 109 110 111 112
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

M
Matthew Wilcox 已提交
113 114 115 116 117 118
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
119
	struct nvme_dev *dev;
120
	char irqname[24];	/* nvme4294967295-65535\0 */
M
Matthew Wilcox 已提交
121 122
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
123
	struct nvme_command __iomem *sq_cmds_io;
M
Matthew Wilcox 已提交
124
	volatile struct nvme_completion *cqes;
125
	struct blk_mq_tags **tags;
M
Matthew Wilcox 已提交
126 127 128 129
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
J
Jens Axboe 已提交
130
	s16 cq_vector;
M
Matthew Wilcox 已提交
131 132
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
133
	u16 qid;
134 135
	u8 cq_phase;
	u8 cqe_seen;
M
Matthew Wilcox 已提交
136 137
};

138 139 140
/*
 * The nvme_iod describes the data in an I/O, including the list of PRP
 * entries.  You can't see it in this data structure because C doesn't let
C
Christoph Hellwig 已提交
141
 * me express that.  Use nvme_init_iod to ensure there's enough space
142 143 144
 * allocated to store the PRP list.
 */
struct nvme_iod {
145
	struct nvme_request req;
C
Christoph Hellwig 已提交
146 147
	struct nvme_queue *nvmeq;
	int aborted;
148 149 150 151
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	int length;		/* Of data, in bytes */
	dma_addr_t first_dma;
152
	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
C
Christoph Hellwig 已提交
153 154
	struct scatterlist *sg;
	struct scatterlist inline_sg[0];
M
Matthew Wilcox 已提交
155 156 157 158 159 160 161 162 163 164 165 166
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
167
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
168
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
169 170 171 172
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
173
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
M
Matthew Wilcox 已提交
174 175
}

176 177 178 179
/*
 * Max size of iod being embedded in the request payload
 */
#define NVME_INT_PAGES		2
180
#define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
181 182 183 184 185 186 187 188

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
189 190
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
191 192 193
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

C
Christoph Hellwig 已提交
194 195
static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg)
196
{
C
Christoph Hellwig 已提交
197 198 199
	return sizeof(__le64 *) * nvme_npages(size, dev) +
			sizeof(struct scatterlist) * nseg;
}
200

C
Christoph Hellwig 已提交
201 202 203 204
static unsigned int nvme_cmd_size(struct nvme_dev *dev)
{
	return sizeof(struct nvme_iod) +
		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
205 206
}

207 208 209 210 211
static int nvmeq_irq(struct nvme_queue *nvmeq)
{
	return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
}

M
Matias Bjørling 已提交
212 213
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
214
{
M
Matias Bjørling 已提交
215 216 217
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[0];

218 219 220 221
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

M
Matias Bjørling 已提交
222
	hctx->driver_data = nvmeq;
223
	nvmeq->tags = &dev->admin_tagset.tags[0];
M
Matias Bjørling 已提交
224
	return 0;
225 226
}

227 228 229 230 231 232 233
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

M
Matias Bjørling 已提交
234 235 236
static int nvme_admin_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
237
{
M
Matias Bjørling 已提交
238
	struct nvme_dev *dev = data;
C
Christoph Hellwig 已提交
239
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
240 241 242
	struct nvme_queue *nvmeq = dev->queues[0];

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
243
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
244
	return 0;
245 246
}

M
Matias Bjørling 已提交
247 248
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
249
{
M
Matias Bjørling 已提交
250
	struct nvme_dev *dev = data;
251
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
M
Matias Bjørling 已提交
252

253 254
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
M
Matthew Wilcox 已提交
255

256
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
M
Matias Bjørling 已提交
257 258
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
259 260
}

M
Matias Bjørling 已提交
261 262 263
static int nvme_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
M
Matthew Wilcox 已提交
264
{
M
Matias Bjørling 已提交
265
	struct nvme_dev *dev = data;
C
Christoph Hellwig 已提交
266
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
267 268 269
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
270
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
271 272 273
	return 0;
}

274 275 276 277 278 279 280
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_dev *dev = set->driver_data;

	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
}

M
Matthew Wilcox 已提交
281
/**
282
 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
283 284 285 286 287
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
288 289
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
						struct nvme_command *cmd)
M
Matthew Wilcox 已提交
290
{
M
Matias Bjørling 已提交
291 292
	u16 tail = nvmeq->sq_tail;

293 294 295 296 297
	if (nvmeq->sq_cmds_io)
		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
	else
		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));

M
Matthew Wilcox 已提交
298 299
	if (++tail == nvmeq->q_depth)
		tail = 0;
300
	writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
301 302 303
	nvmeq->sq_tail = tail;
}

C
Christoph Hellwig 已提交
304
static __le64 **iod_list(struct request *req)
M
Matthew Wilcox 已提交
305
{
C
Christoph Hellwig 已提交
306
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
307
	return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
M
Matthew Wilcox 已提交
308 309
}

310
static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
311
{
C
Christoph Hellwig 已提交
312
	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
313
	int nseg = blk_rq_nr_phys_segments(rq);
314
	unsigned int size = blk_rq_payload_bytes(rq);
315

C
Christoph Hellwig 已提交
316 317 318 319 320 321
	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
		if (!iod->sg)
			return BLK_MQ_RQ_QUEUE_BUSY;
	} else {
		iod->sg = iod->inline_sg;
322 323
	}

C
Christoph Hellwig 已提交
324 325 326 327
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->length = size;
K
Keith Busch 已提交
328

329
	if (!(rq->rq_flags & RQF_DONTPREP)) {
K
Keith Busch 已提交
330
		rq->retries = 0;
331
		rq->rq_flags |= RQF_DONTPREP;
K
Keith Busch 已提交
332
	}
333
	return BLK_MQ_RQ_QUEUE_OK;
334 335
}

C
Christoph Hellwig 已提交
336
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
337
{
C
Christoph Hellwig 已提交
338
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
339
	const int last_prp = dev->ctrl.page_size / 8 - 1;
340
	int i;
C
Christoph Hellwig 已提交
341
	__le64 **list = iod_list(req);
342 343 344 345 346 347 348 349 350 351
	dma_addr_t prp_dma = iod->first_dma;

	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
	for (i = 0; i < iod->npages; i++) {
		__le64 *prp_list = list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
		prp_dma = next_prp_dma;
	}
352

C
Christoph Hellwig 已提交
353 354
	if (iod->sg != iod->inline_sg)
		kfree(iod->sg);
K
Keith Busch 已提交
355 356
}

357
#ifdef CONFIG_BLK_DEV_INTEGRITY
K
Keith Busch 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == v)
		pi->ref_tag = cpu_to_be32(p);
}

static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == p)
		pi->ref_tag = cpu_to_be32(v);
}

/**
 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
 *
 * The virtual start sector is the one that was originally submitted by the
 * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
 * start sector may be different. Remap protection information to match the
 * physical LBA on writes, and back to the original seed on reads.
 *
 * Type 0 and 3 do not have a ref tag, so no remapping required.
 */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
	struct nvme_ns *ns = req->rq_disk->private_data;
	struct bio_integrity_payload *bip;
	struct t10_pi_tuple *pi;
	void *p, *pmap;
	u32 i, nlb, ts, phys, virt;

	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
		return;

	bip = bio_integrity(req->bio);
	if (!bip)
		return;

	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;

	p = pmap;
	virt = bip_get_seed(bip);
	phys = nvme_block_nr(ns, blk_rq_pos(req));
	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
402
	ts = ns->disk->queue->integrity.tuple_size;
K
Keith Busch 已提交
403 404 405 406 407 408 409 410

	for (i = 0; i < nlb; i++, virt++, phys++) {
		pi = (struct t10_pi_tuple *)p;
		dif_swap(phys, virt, pi);
		p += ts;
	}
	kunmap_atomic(pmap);
}
411 412 413 414 415 416 417 418 419 420 421 422 423
#else /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
}
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
#endif

424
static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
425
{
C
Christoph Hellwig 已提交
426
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427
	struct dma_pool *pool;
428
	int length = blk_rq_payload_bytes(req);
429
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
430 431
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
432
	u32 page_size = dev->ctrl.page_size;
433
	int offset = dma_addr & (page_size - 1);
434
	__le64 *prp_list;
C
Christoph Hellwig 已提交
435
	__le64 **list = iod_list(req);
436
	dma_addr_t prp_dma;
437
	int nprps, i;
M
Matthew Wilcox 已提交
438

439
	length -= (page_size - offset);
M
Matthew Wilcox 已提交
440
	if (length <= 0)
441
		return true;
M
Matthew Wilcox 已提交
442

443
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
444
	if (dma_len) {
445
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
446 447 448 449 450 451
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

452
	if (length <= page_size) {
453
		iod->first_dma = dma_addr;
454
		return true;
455 456
	}

457
	nprps = DIV_ROUND_UP(length, page_size);
458 459
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
460
		iod->npages = 0;
461 462
	} else {
		pool = dev->prp_page_pool;
463
		iod->npages = 1;
464 465
	}

466
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
467
	if (!prp_list) {
468
		iod->first_dma = dma_addr;
469
		iod->npages = -1;
470
		return false;
471
	}
472 473
	list[0] = prp_list;
	iod->first_dma = prp_dma;
474 475
	i = 0;
	for (;;) {
476
		if (i == page_size >> 3) {
477
			__le64 *old_prp_list = prp_list;
478
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
479
			if (!prp_list)
480
				return false;
481
			list[iod->npages++] = prp_list;
482 483 484
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
485 486
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
487 488 489
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
490 491 492 493 494 495 496 497
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
498 499
	}

500
	return true;
M
Matthew Wilcox 已提交
501 502
}

C
Christoph Hellwig 已提交
503
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
504
		struct nvme_command *cmnd)
505
{
C
Christoph Hellwig 已提交
506
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
507 508 509 510
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
	int ret = BLK_MQ_RQ_QUEUE_ERROR;
511

512
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
C
Christoph Hellwig 已提交
513 514 515
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
516

C
Christoph Hellwig 已提交
517
	ret = BLK_MQ_RQ_QUEUE_BUSY;
518 519
	if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
				DMA_ATTR_NO_WARN))
C
Christoph Hellwig 已提交
520
		goto out;
521

522
	if (!nvme_setup_prps(dev, req))
C
Christoph Hellwig 已提交
523
		goto out_unmap;
524

C
Christoph Hellwig 已提交
525 526 527 528
	ret = BLK_MQ_RQ_QUEUE_ERROR;
	if (blk_integrity_rq(req)) {
		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
			goto out_unmap;
529

530 531
		sg_init_table(&iod->meta_sg, 1);
		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
C
Christoph Hellwig 已提交
532
			goto out_unmap;
533

C
Christoph Hellwig 已提交
534 535
		if (rq_data_dir(req))
			nvme_dif_remap(req, nvme_dif_prep);
536

537
		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
C
Christoph Hellwig 已提交
538
			goto out_unmap;
539
	}
M
Matthew Wilcox 已提交
540

541 542
	cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
C
Christoph Hellwig 已提交
543
	if (blk_integrity_rq(req))
544
		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
C
Christoph Hellwig 已提交
545
	return BLK_MQ_RQ_QUEUE_OK;
M
Matthew Wilcox 已提交
546

C
Christoph Hellwig 已提交
547 548 549 550
out_unmap:
	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
	return ret;
M
Matthew Wilcox 已提交
551 552
}

C
Christoph Hellwig 已提交
553
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
554
{
C
Christoph Hellwig 已提交
555
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
556 557 558 559 560 561 562 563
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
			if (!rq_data_dir(req))
				nvme_dif_remap(req, nvme_dif_complete);
564
			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
K
Keith Busch 已提交
565
		}
566
	}
K
Keith Busch 已提交
567

568
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
569
	nvme_free_iod(dev, req);
570
}
M
Matthew Wilcox 已提交
571

572 573 574
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
M
Matias Bjørling 已提交
575 576
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
577
{
M
Matias Bjørling 已提交
578 579
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
580
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
581
	struct request *req = bd->rq;
C
Christoph Hellwig 已提交
582 583
	struct nvme_command cmnd;
	int ret = BLK_MQ_RQ_QUEUE_OK;
584

K
Keith Busch 已提交
585 586 587 588 589
	/*
	 * If formated with metadata, require the block layer provide a buffer
	 * unless this namespace is formated such that the metadata can be
	 * stripped/generated by the controller with PRACT=1.
	 */
590
	if (ns && ns->ms && !blk_integrity_rq(req)) {
591
		if (!(ns->pi_type && ns->ms == 8) &&
592
		    !blk_rq_is_passthrough(req)) {
593
			blk_mq_end_request(req, -EFAULT);
K
Keith Busch 已提交
594 595 596 597
			return BLK_MQ_RQ_QUEUE_OK;
		}
	}

598
	ret = nvme_setup_cmd(ns, req, &cmnd);
599
	if (ret != BLK_MQ_RQ_QUEUE_OK)
C
Christoph Hellwig 已提交
600
		return ret;
M
Matias Bjørling 已提交
601

602
	ret = nvme_init_iod(req, dev);
603
	if (ret != BLK_MQ_RQ_QUEUE_OK)
604
		goto out_free_cmd;
M
Matias Bjørling 已提交
605

606
	if (blk_rq_nr_phys_segments(req))
607
		ret = nvme_map_data(dev, req, &cmnd);
M
Matias Bjørling 已提交
608

609
	if (ret != BLK_MQ_RQ_QUEUE_OK)
610
		goto out_cleanup_iod;
M
Matias Bjørling 已提交
611

612
	blk_mq_start_request(req);
M
Matias Bjørling 已提交
613

C
Christoph Hellwig 已提交
614
	spin_lock_irq(&nvmeq->q_lock);
615
	if (unlikely(nvmeq->cq_vector < 0)) {
616
		ret = BLK_MQ_RQ_QUEUE_ERROR;
617
		spin_unlock_irq(&nvmeq->q_lock);
618
		goto out_cleanup_iod;
619
	}
C
Christoph Hellwig 已提交
620
	__nvme_submit_cmd(nvmeq, &cmnd);
M
Matias Bjørling 已提交
621 622 623
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
	return BLK_MQ_RQ_QUEUE_OK;
624
out_cleanup_iod:
C
Christoph Hellwig 已提交
625
	nvme_free_iod(dev, req);
626 627
out_free_cmd:
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
628
	return ret;
M
Matthew Wilcox 已提交
629
}
K
Keith Busch 已提交
630

631 632
static void nvme_complete_rq(struct request *req)
{
C
Christoph Hellwig 已提交
633 634
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_dev *dev = iod->nvmeq->dev;
635
	int error = 0;
K
Keith Busch 已提交
636

C
Christoph Hellwig 已提交
637
	nvme_unmap_data(dev, req);
K
Keith Busch 已提交
638

639 640
	if (unlikely(req->errors)) {
		if (nvme_req_needs_retry(req, req->errors)) {
K
Keith Busch 已提交
641
			req->retries++;
642 643
			nvme_requeue_req(req);
			return;
K
Keith Busch 已提交
644
		}
645

646
		if (blk_rq_is_passthrough(req))
647 648 649 650
			error = req->errors;
		else
			error = nvme_error_status(req->errors);
	}
M
Matias Bjørling 已提交
651

C
Christoph Hellwig 已提交
652
	if (unlikely(iod->aborted)) {
653
		dev_warn(dev->ctrl.device,
654 655 656
			"completing aborted command with status: %04x\n",
			req->errors);
	}
M
Matias Bjørling 已提交
657

658
	blk_mq_end_request(req, error);
M
Matthew Wilcox 已提交
659 660
}

661 662 663 664 665 666 667
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
		u16 phase)
{
	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}

J
Jens Axboe 已提交
668
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
M
Matthew Wilcox 已提交
669
{
M
Matthew Wilcox 已提交
670
	u16 head, phase;
M
Matthew Wilcox 已提交
671 672

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
673
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
674

675
	while (nvme_cqe_valid(nvmeq, head, phase)) {
M
Matthew Wilcox 已提交
676
		struct nvme_completion cqe = nvmeq->cqes[head];
677
		struct request *req;
678

M
Matthew Wilcox 已提交
679 680
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
681
			phase = !phase;
M
Matthew Wilcox 已提交
682
		}
683

J
Jens Axboe 已提交
684 685
		if (tag && *tag == cqe.command_id)
			*tag = -1;
686

687
		if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
688
			dev_warn(nvmeq->dev->ctrl.device,
689 690 691 692 693
				"invalid id %d completed on queue %d\n",
				cqe.command_id, le16_to_cpu(cqe.sq_id));
			continue;
		}

694 695 696 697 698 699 700 701
		/*
		 * AEN requests are special as they don't time out and can
		 * survive any kind of queue freeze and often don't respond to
		 * aborts.  We don't even bother to allocate a struct request
		 * for them but rather special case them here.
		 */
		if (unlikely(nvmeq->qid == 0 &&
				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
702 703
			nvme_complete_async_event(&nvmeq->dev->ctrl,
					cqe.status, &cqe.result);
704 705 706
			continue;
		}

707
		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
708
		nvme_req(req)->result = cqe.result;
709
		blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
M
Matthew Wilcox 已提交
710 711
	}

M
Matthew Wilcox 已提交
712
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
J
Jens Axboe 已提交
713
		return;
M
Matthew Wilcox 已提交
714

715 716
	if (likely(nvmeq->cq_vector >= 0))
		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
M
Matthew Wilcox 已提交
717
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
718
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
719

720
	nvmeq->cqe_seen = 1;
J
Jens Axboe 已提交
721 722 723 724 725
}

static void nvme_process_cq(struct nvme_queue *nvmeq)
{
	__nvme_process_cq(nvmeq, NULL);
M
Matthew Wilcox 已提交
726 727 728
}

static irqreturn_t nvme_irq(int irq, void *data)
729 730 731 732
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
733 734 735
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
736 737 738 739 740 741 742
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
743 744 745
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
746 747
}

J
Jens Axboe 已提交
748 749 750 751
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

752
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
J
Jens Axboe 已提交
753 754 755 756 757 758 759 760 761 762 763
		spin_lock_irq(&nvmeq->q_lock);
		__nvme_process_cq(nvmeq, &tag);
		spin_unlock_irq(&nvmeq->q_lock);

		if (tag == -1)
			return 1;
	}

	return 0;
}

764
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
M
Matthew Wilcox 已提交
765
{
766
	struct nvme_dev *dev = to_nvme_dev(ctrl);
767
	struct nvme_queue *nvmeq = dev->queues[0];
M
Matias Bjørling 已提交
768
	struct nvme_command c;
M
Matthew Wilcox 已提交
769

M
Matias Bjørling 已提交
770 771
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
772
	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
773

774
	spin_lock_irq(&nvmeq->q_lock);
775
	__nvme_submit_cmd(nvmeq, &c);
776
	spin_unlock_irq(&nvmeq->q_lock);
777 778
}

M
Matthew Wilcox 已提交
779
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
780
{
M
Matthew Wilcox 已提交
781 782 783 784 785 786
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

787
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
788 789 790 791 792 793 794 795
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

796 797 798 799
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
800 801 802 803 804 805 806 807
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

808
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
809 810 811 812 813 814 815 816
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

817 818 819 820
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
821 822 823 824 825 826 827 828
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

829
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
830 831 832 833 834 835 836 837 838 839 840 841
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

842
static void abort_endio(struct request *req, int error)
843
{
C
Christoph Hellwig 已提交
844 845
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
846
	u16 status = req->errors;
847

848
	dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
849 850
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
851 852
}

853
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
K
Keith Busch 已提交
854
{
C
Christoph Hellwig 已提交
855 856
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
K
Keith Busch 已提交
857
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
858 859
	struct request *abort_req;
	struct nvme_command cmd;
K
Keith Busch 已提交
860

861
	/*
862 863 864 865 866
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 */
867
	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
868
		dev_warn(dev->ctrl.device,
869 870
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
871
		nvme_dev_disable(dev, false);
872 873
		req->errors = NVME_SC_CANCELLED;
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
874 875
	}

876 877 878 879
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
880
	 */
C
Christoph Hellwig 已提交
881
	if (!nvmeq->qid || iod->aborted) {
882
		dev_warn(dev->ctrl.device,
883 884
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
885
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
886
		nvme_reset(dev);
K
Keith Busch 已提交
887

888 889 890 891 892 893
		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
		req->errors = NVME_SC_CANCELLED;
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
894 895
	}

896
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
897
		atomic_inc(&dev->ctrl.abort_limit);
898
		return BLK_EH_RESET_TIMER;
899
	}
900
	iod->aborted = 1;
M
Matias Bjørling 已提交
901

K
Keith Busch 已提交
902 903
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
904
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
905 906
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

907 908 909
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
910 911

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
912
			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
913 914 915 916 917 918 919 920
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
K
Keith Busch 已提交
921

922 923 924 925 926 927
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
K
Keith Busch 已提交
928 929
}

M
Matias Bjørling 已提交
930 931
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
932 933
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
934 935
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
936 937 938 939
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

940
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
941 942 943
{
	int i;

944
	for (i = dev->queue_count - 1; i >= lowest; i--) {
M
Matias Bjørling 已提交
945
		struct nvme_queue *nvmeq = dev->queues[i];
946
		dev->queue_count--;
M
Matias Bjørling 已提交
947
		dev->queues[i] = NULL;
K
Keith Busch 已提交
948
		nvme_free_queue(nvmeq);
949
	}
950 951
}

K
Keith Busch 已提交
952 953 954 955 956
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
957
{
K
Keith Busch 已提交
958
	int vector;
M
Matthew Wilcox 已提交
959

960
	spin_lock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
961 962 963 964
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
965
	vector = nvmeq_irq(nvmeq);
K
Keith Busch 已提交
966
	nvmeq->dev->online_queues--;
K
Keith Busch 已提交
967
	nvmeq->cq_vector = -1;
968 969
	spin_unlock_irq(&nvmeq->q_lock);

970
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
971
		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
972

M
Matthew Wilcox 已提交
973
	free_irq(vector, nvmeq);
M
Matthew Wilcox 已提交
974

K
Keith Busch 已提交
975 976
	return 0;
}
M
Matthew Wilcox 已提交
977

978
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
K
Keith Busch 已提交
979
{
980
	struct nvme_queue *nvmeq = dev->queues[0];
K
Keith Busch 已提交
981 982 983 984 985 986

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

987 988 989 990 991
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
		nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
						dev->bar + NVME_REG_CAP));
992 993 994 995

	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
996 997
}

998 999 1000 1001
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1002 1003
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1004 1005

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1006
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1007
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1008
		q_depth = div_u64(mem_per_q, entry_size);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1026 1027
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
						      dev->ctrl.page_size);
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
		nvmeq->sq_cmds_io = dev->cmb + offset;
	} else {
		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
		if (!nvmeq->sq_cmds)
			return -ENOMEM;
	}

	return 0;
}

M
Matthew Wilcox 已提交
1040
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
K
Keith Busch 已提交
1041
							int depth)
M
Matthew Wilcox 已提交
1042
{
M
Matias Bjørling 已提交
1043
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
M
Matthew Wilcox 已提交
1044 1045 1046
	if (!nvmeq)
		return NULL;

1047
	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
J
Joe Perches 已提交
1048
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1049 1050 1051
	if (!nvmeq->cqes)
		goto free_nvmeq;

1052
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
M
Matthew Wilcox 已提交
1053 1054
		goto free_cqdma;

1055
	nvmeq->q_dmadev = dev->dev;
M
Matthew Wilcox 已提交
1056
	nvmeq->dev = dev;
1057
	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1058
			dev->ctrl.instance, qid);
M
Matthew Wilcox 已提交
1059 1060
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1061
	nvmeq->cq_phase = 1;
1062
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1063
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1064
	nvmeq->qid = qid;
1065
	nvmeq->cq_vector = -1;
M
Matias Bjørling 已提交
1066
	dev->queues[qid] = nvmeq;
1067 1068
	dev->queue_count++;

M
Matthew Wilcox 已提交
1069 1070 1071
	return nvmeq;

 free_cqdma:
1072
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1073 1074 1075 1076 1077 1078
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

1079
static int queue_request_irq(struct nvme_queue *nvmeq)
1080
{
1081
	if (use_threaded_interrupts)
1082 1083 1084 1085 1086
		return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
				nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
	else
		return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
				nvmeq->irqname, nvmeq);
1087 1088
}

1089
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1090
{
1091
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1092

1093
	spin_lock_irq(&nvmeq->q_lock);
1094 1095 1096
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1097
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1098
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
K
Keith Busch 已提交
1099
	dev->online_queues++;
1100
	spin_unlock_irq(&nvmeq->q_lock);
1101 1102 1103 1104 1105 1106
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1107

K
Keith Busch 已提交
1108
	nvmeq->cq_vector = qid - 1;
M
Matthew Wilcox 已提交
1109 1110
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1111
		return result;
M
Matthew Wilcox 已提交
1112 1113 1114 1115 1116

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1117
	result = queue_request_irq(nvmeq);
M
Matthew Wilcox 已提交
1118 1119 1120
	if (result < 0)
		goto release_sq;

1121 1122
	nvme_init_queue(nvmeq, qid);
	return result;
M
Matthew Wilcox 已提交
1123 1124 1125 1126 1127

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1128
	return result;
M
Matthew Wilcox 已提交
1129 1130
}

M
Matias Bjørling 已提交
1131
static struct blk_mq_ops nvme_mq_admin_ops = {
1132
	.queue_rq	= nvme_queue_rq,
1133
	.complete	= nvme_complete_rq,
M
Matias Bjørling 已提交
1134
	.init_hctx	= nvme_admin_init_hctx,
1135
	.exit_hctx      = nvme_admin_exit_hctx,
M
Matias Bjørling 已提交
1136 1137 1138 1139 1140 1141
	.init_request	= nvme_admin_init_request,
	.timeout	= nvme_timeout,
};

static struct blk_mq_ops nvme_mq_ops = {
	.queue_rq	= nvme_queue_rq,
1142
	.complete	= nvme_complete_rq,
M
Matias Bjørling 已提交
1143 1144
	.init_hctx	= nvme_init_hctx,
	.init_request	= nvme_init_request,
1145
	.map_queues	= nvme_pci_map_queues,
M
Matias Bjørling 已提交
1146
	.timeout	= nvme_timeout,
J
Jens Axboe 已提交
1147
	.poll		= nvme_poll,
M
Matias Bjørling 已提交
1148 1149
};

1150 1151
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
1152
	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1153 1154 1155 1156 1157 1158
		/*
		 * If the controller was reset during removal, it's possible
		 * user requests may be waiting on a stopped queue. Start the
		 * queue to flush these to completion.
		 */
		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1159
		blk_cleanup_queue(dev->ctrl.admin_q);
1160 1161 1162 1163
		blk_mq_free_tag_set(&dev->admin_tagset);
	}
}

M
Matias Bjørling 已提交
1164 1165
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
1166
	if (!dev->ctrl.admin_q) {
M
Matias Bjørling 已提交
1167 1168
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
K
Keith Busch 已提交
1169 1170 1171 1172 1173 1174

		/*
		 * Subtract one to leave an empty queue entry for 'Full Queue'
		 * condition. See NVM-Express 1.2 specification, section 4.1.2.
		 */
		dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
M
Matias Bjørling 已提交
1175
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1176
		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1177
		dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
1178
		dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
M
Matias Bjørling 已提交
1179 1180 1181 1182 1183
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;

1184 1185
		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
		if (IS_ERR(dev->ctrl.admin_q)) {
M
Matias Bjørling 已提交
1186 1187 1188
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
1189
		if (!blk_get_queue(dev->ctrl.admin_q)) {
1190
			nvme_dev_remove_admin(dev);
1191
			dev->ctrl.admin_q = NULL;
1192 1193
			return -ENODEV;
		}
K
Keith Busch 已提交
1194
	} else
1195
		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
M
Matias Bjørling 已提交
1196 1197 1198 1199

	return 0;
}

1200
static int nvme_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1201
{
1202
	int result;
M
Matthew Wilcox 已提交
1203
	u32 aqa;
1204
	u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
M
Matthew Wilcox 已提交
1205 1206
	struct nvme_queue *nvmeq;

1207
	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1208 1209
						NVME_CAP_NSSRC(cap) : 0;

1210 1211 1212
	if (dev->subsystem &&
	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1213

1214
	result = nvme_disable_ctrl(&dev->ctrl, cap);
1215 1216
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1217

M
Matias Bjørling 已提交
1218
	nvmeq = dev->queues[0];
1219
	if (!nvmeq) {
K
Keith Busch 已提交
1220
		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1221 1222 1223
		if (!nvmeq)
			return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1224 1225 1226 1227

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1228 1229 1230
	writel(aqa, dev->bar + NVME_REG_AQA);
	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
M
Matthew Wilcox 已提交
1231

1232
	result = nvme_enable_ctrl(&dev->ctrl, cap);
1233
	if (result)
K
Keith Busch 已提交
1234
		return result;
M
Matias Bjørling 已提交
1235

K
Keith Busch 已提交
1236
	nvmeq->cq_vector = 0;
1237
	result = queue_request_irq(nvmeq);
1238 1239
	if (result) {
		nvmeq->cq_vector = -1;
K
Keith Busch 已提交
1240
		return result;
1241
	}
1242

M
Matthew Wilcox 已提交
1243 1244 1245
	return result;
}

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{

	/* If true, indicates loss of adapter communication, possibly by a
	 * NVMe Subsystem reset.
	 */
	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);

	/* If there is a reset ongoing, we shouldn't reset again. */
	if (work_busy(&dev->reset_work))
		return false;

	/* We shouldn't reset unless the controller is on fatal error state
	 * _or_ if we lost the communication with it.
	 */
	if (!(csts & NVME_CSTS_CFS) && !nssro)
		return false;

	/* If PCI error recovery process is happening, we cannot reset or
	 * the recovery mechanism will surely fail.
	 */
	if (pci_channel_offline(to_pci_dev(dev->dev)))
		return false;

	return true;
}

1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
	/* Read a config register to help see what died. */
	u16 pci_status;
	int result;

	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
				      &pci_status);
	if (result == PCIBIOS_SUCCESSFUL)
		dev_warn(dev->dev,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
			 csts, pci_status);
	else
		dev_warn(dev->dev,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
			 csts, result);
}

1291
static void nvme_watchdog_timer(unsigned long data)
1292
{
1293 1294
	struct nvme_dev *dev = (struct nvme_dev *)data;
	u32 csts = readl(dev->bar + NVME_REG_CSTS);
1295

1296 1297
	/* Skip controllers under certain specific conditions. */
	if (nvme_should_reset(dev, csts)) {
K
Keith Busch 已提交
1298
		if (!nvme_reset(dev))
1299
			nvme_warn_reset(dev, csts);
1300
		return;
1301
	}
1302 1303

	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1304 1305
}

1306
static int nvme_create_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1307
{
1308
	unsigned i, max;
1309
	int ret = 0;
K
Keith Busch 已提交
1310

1311 1312 1313
	for (i = dev->queue_count; i <= dev->max_qid; i++) {
		if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
			ret = -ENOMEM;
K
Keith Busch 已提交
1314
			break;
1315 1316
		}
	}
K
Keith Busch 已提交
1317

1318 1319
	max = min(dev->max_qid, dev->queue_count - 1);
	for (i = dev->online_queues; i <= max; i++) {
1320
		ret = nvme_create_queue(dev->queues[i], i);
K
Keith Busch 已提交
1321
		if (ret)
K
Keith Busch 已提交
1322
			break;
M
Matthew Wilcox 已提交
1323
	}
1324 1325 1326 1327 1328 1329 1330 1331

	/*
	 * Ignore failing Create SQ/CQ commands, we can continue with less
	 * than the desired aount of queues, and even a controller without
	 * I/O queues an still be used to issue admin commands.  This might
	 * be useful to upgrade a buggy firmware for example.
	 */
	return ret >= 0 ? 0 : ret;
M
Matthew Wilcox 已提交
1332 1333
}

1334 1335 1336 1337 1338 1339
static ssize_t nvme_cmb_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));

1340
	return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1341 1342 1343 1344
		       ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);

1345 1346 1347 1348 1349 1350 1351 1352
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
	u64 szu, size, offset;
	resource_size_t bar_size;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
	void __iomem *cmb;
	dma_addr_t dma_addr;

1353
	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1354 1355
	if (!(NVME_CMB_SZ(dev->cmbsz)))
		return NULL;
1356
	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1357

1358 1359
	if (!use_cmb_sqes)
		return NULL;
1360 1361 1362

	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
	size = szu * NVME_CMB_SZ(dev->cmbsz);
1363 1364
	offset = szu * NVME_CMB_OFST(dev->cmbloc);
	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376

	if (offset > bar_size)
		return NULL;

	/*
	 * Controllers may support a CMB size larger than their BAR,
	 * for example, due to being behind a bridge. Reduce the CMB to
	 * the reported size of the BAR
	 */
	if (size > bar_size - offset)
		size = bar_size - offset;

1377
	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
	cmb = ioremap_wc(dma_addr, size);
	if (!cmb)
		return NULL;

	dev->cmb_dma_addr = dma_addr;
	dev->cmb_size = size;
	return cmb;
}

static inline void nvme_release_cmb(struct nvme_dev *dev)
{
	if (dev->cmb) {
		iounmap(dev->cmb);
		dev->cmb = NULL;
	}
}

K
Keith Busch 已提交
1395 1396
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
1397
	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
K
Keith Busch 已提交
1398 1399
}

1400
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1401
{
M
Matias Bjørling 已提交
1402
	struct nvme_queue *adminq = dev->queues[0];
1403
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1404
	int result, nr_io_queues, size;
M
Matthew Wilcox 已提交
1405

1406
	nr_io_queues = num_online_cpus();
C
Christoph Hellwig 已提交
1407 1408
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
	if (result < 0)
M
Matthew Wilcox 已提交
1409
		return result;
C
Christoph Hellwig 已提交
1410

1411
	if (nr_io_queues == 0)
1412
		return 0;
M
Matthew Wilcox 已提交
1413

1414 1415 1416 1417 1418 1419 1420 1421 1422
	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
		result = nvme_cmb_qdepth(dev, nr_io_queues,
				sizeof(struct nvme_command));
		if (result > 0)
			dev->q_depth = result;
		else
			nvme_release_cmb(dev);
	}

K
Keith Busch 已提交
1423 1424
	size = db_bar_size(dev, nr_io_queues);
	if (size > 8192) {
1425
		iounmap(dev->bar);
K
Keith Busch 已提交
1426 1427 1428 1429 1430 1431 1432 1433
		do {
			dev->bar = ioremap(pci_resource_start(pdev, 0), size);
			if (dev->bar)
				break;
			if (!--nr_io_queues)
				return -ENOMEM;
			size = db_bar_size(dev, nr_io_queues);
		} while (1);
1434
		dev->dbs = dev->bar + 4096;
1435
		adminq->q_db = dev->dbs;
1436 1437
	}

K
Keith Busch 已提交
1438
	/* Deregister the admin queue's interrupt */
1439
	free_irq(pci_irq_vector(pdev, 0), adminq);
K
Keith Busch 已提交
1440

1441 1442 1443 1444
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
1445 1446 1447 1448 1449 1450
	pci_free_irq_vectors(pdev);
	nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
			PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
	if (nr_io_queues <= 0)
		return -EIO;
	dev->max_qid = nr_io_queues;
R
Ramachandra Rao Gajula 已提交
1451

1452 1453 1454 1455 1456 1457 1458
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */

1459
	result = queue_request_irq(adminq);
1460 1461
	if (result) {
		adminq->cq_vector = -1;
K
Keith Busch 已提交
1462
		return result;
1463
	}
1464
	return nvme_create_io_queues(dev);
M
Matthew Wilcox 已提交
1465 1466
}

K
Keith Busch 已提交
1467
static void nvme_del_queue_end(struct request *req, int error)
K
Keith Busch 已提交
1468
{
K
Keith Busch 已提交
1469
	struct nvme_queue *nvmeq = req->end_io_data;
1470

K
Keith Busch 已提交
1471 1472
	blk_mq_free_request(req);
	complete(&nvmeq->dev->ioq_wait);
K
Keith Busch 已提交
1473 1474
}

K
Keith Busch 已提交
1475
static void nvme_del_cq_end(struct request *req, int error)
K
Keith Busch 已提交
1476
{
K
Keith Busch 已提交
1477
	struct nvme_queue *nvmeq = req->end_io_data;
K
Keith Busch 已提交
1478

K
Keith Busch 已提交
1479 1480 1481
	if (!error) {
		unsigned long flags;

1482 1483 1484 1485 1486 1487 1488
		/*
		 * We might be called with the AQ q_lock held
		 * and the I/O queue q_lock should always
		 * nest inside the AQ one.
		 */
		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
					SINGLE_DEPTH_NESTING);
K
Keith Busch 已提交
1489 1490
		nvme_process_cq(nvmeq);
		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
K
Keith Busch 已提交
1491
	}
K
Keith Busch 已提交
1492 1493

	nvme_del_queue_end(req, error);
K
Keith Busch 已提交
1494 1495
}

K
Keith Busch 已提交
1496
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1497
{
K
Keith Busch 已提交
1498 1499 1500
	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
	struct request *req;
	struct nvme_command cmd;
1501

K
Keith Busch 已提交
1502 1503 1504
	memset(&cmd, 0, sizeof(cmd));
	cmd.delete_queue.opcode = opcode;
	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
1505

1506
	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
K
Keith Busch 已提交
1507 1508
	if (IS_ERR(req))
		return PTR_ERR(req);
1509

K
Keith Busch 已提交
1510 1511 1512 1513 1514 1515 1516
	req->timeout = ADMIN_TIMEOUT;
	req->end_io_data = nvmeq;

	blk_execute_rq_nowait(q, NULL, req, false,
			opcode == nvme_admin_delete_cq ?
				nvme_del_cq_end : nvme_del_queue_end);
	return 0;
1517 1518
}

1519
static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
K
Keith Busch 已提交
1520
{
1521
	int pass;
K
Keith Busch 已提交
1522 1523
	unsigned long timeout;
	u8 opcode = nvme_admin_delete_sq;
K
Keith Busch 已提交
1524

K
Keith Busch 已提交
1525
	for (pass = 0; pass < 2; pass++) {
K
Keith Busch 已提交
1526
		int sent = 0, i = queues;
K
Keith Busch 已提交
1527 1528 1529 1530

		reinit_completion(&dev->ioq_wait);
 retry:
		timeout = ADMIN_TIMEOUT;
1531 1532
		for (; i > 0; i--, sent++)
			if (nvme_delete_queue(dev->queues[i], opcode))
K
Keith Busch 已提交
1533
				break;
1534

K
Keith Busch 已提交
1535 1536 1537 1538 1539 1540 1541 1542 1543
		while (sent--) {
			timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
			if (timeout == 0)
				return;
			if (i)
				goto retry;
		}
		opcode = nvme_admin_delete_cq;
	}
K
Keith Busch 已提交
1544 1545
}

1546 1547 1548 1549 1550 1551
/*
 * Return: error value if an error occurred setting up the queues or calling
 * Identify Device.  0 if these succeeded, even if adding some of the
 * namespaces failed.  At the moment, these failures are silent.  TBD which
 * failures should be reported.
 */
1552
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1553
{
1554
	if (!dev->ctrl.tagset) {
1555 1556 1557 1558 1559
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev_to_node(dev->dev);
		dev->tagset.queue_depth =
M
Matias Bjørling 已提交
1560
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
1561 1562 1563
		dev->tagset.cmd_size = nvme_cmd_size(dev);
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
M
Matthew Wilcox 已提交
1564

1565 1566
		if (blk_mq_alloc_tag_set(&dev->tagset))
			return 0;
1567
		dev->ctrl.tagset = &dev->tagset;
1568 1569 1570 1571 1572
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
1573
	}
1574

K
Keith Busch 已提交
1575
	return 0;
M
Matthew Wilcox 已提交
1576 1577
}

1578
static int nvme_pci_enable(struct nvme_dev *dev)
1579
{
K
Keith Busch 已提交
1580
	u64 cap;
1581
	int result = -ENOMEM;
1582
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1583 1584 1585 1586 1587 1588

	if (pci_enable_device_mem(pdev))
		return result;

	pci_set_master(pdev);

1589 1590
	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1591
		goto disable;
1592

1593
	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
K
Keith Busch 已提交
1594
		result = -ENODEV;
1595
		goto disable;
K
Keith Busch 已提交
1596
	}
1597 1598

	/*
1599 1600 1601
	 * Some devices and/or platforms don't advertise or work with INTx
	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
	 * adjust this later.
1602
	 */
1603 1604 1605
	result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (result < 0)
		return result;
1606

1607 1608
	cap = lo_hi_readq(dev->bar + NVME_REG_CAP);

K
Keith Busch 已提交
1609 1610
	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
1611
	dev->dbs = dev->bar + 4096;
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623

	/*
	 * Temporary fix for the Apple controller found in the MacBook8,1 and
	 * some MacBook7,1 to avoid controller resets and data loss.
	 */
	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
		dev->q_depth = 2;
		dev_warn(dev->dev, "detected Apple NVMe controller, set "
			"queue depth=%u to work around controller resets\n",
			dev->q_depth);
	}

1624 1625 1626 1627 1628 1629 1630 1631
	/*
	 * CMBs can currently only exist on >=1.2 PCIe devices. We only
	 * populate sysfs if a CMB is implemented. Note that we add the
	 * CMB attribute to the nvme_ctrl kobj which removes the need to remove
	 * it on exit. Since nvme_dev_attrs_group has no name we can pass
	 * NULL as final argument to sysfs_add_file_to_group.
	 */

1632
	if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
1633
		dev->cmb = nvme_map_cmb(dev);
1634

1635 1636 1637 1638 1639 1640 1641 1642
		if (dev->cmbsz) {
			if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
						    &dev_attr_cmb.attr, NULL))
				dev_warn(dev->dev,
					 "failed to add sysfs attribute for CMB\n");
		}
	}

K
Keith Busch 已提交
1643 1644
	pci_enable_pcie_error_reporting(pdev);
	pci_save_state(pdev);
1645 1646 1647 1648 1649 1650 1651 1652
	return 0;

 disable:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
1653 1654 1655
{
	if (dev->bar)
		iounmap(dev->bar);
1656
	pci_release_mem_regions(to_pci_dev(dev->dev));
1657 1658 1659
}

static void nvme_pci_disable(struct nvme_dev *dev)
1660
{
1661 1662
	struct pci_dev *pdev = to_pci_dev(dev->dev);

1663
	pci_free_irq_vectors(pdev);
1664

K
Keith Busch 已提交
1665 1666
	if (pci_is_enabled(pdev)) {
		pci_disable_pcie_error_reporting(pdev);
1667
		pci_disable_device(pdev);
K
Keith Busch 已提交
1668 1669 1670
	}
}

1671
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
M
Matthew Wilcox 已提交
1672
{
1673
	int i, queues;
1674
	u32 csts = -1;
1675

1676
	del_timer_sync(&dev->watchdog_timer);
1677

1678
	mutex_lock(&dev->shutdown_lock);
1679
	if (pci_is_enabled(to_pci_dev(dev->dev))) {
1680
		nvme_stop_queues(&dev->ctrl);
1681
		csts = readl(dev->bar + NVME_REG_CSTS);
1682
	}
1683

1684
	queues = dev->online_queues - 1;
1685 1686 1687
	for (i = dev->queue_count - 1; i > 0; i--)
		nvme_suspend_queue(dev->queues[i]);

1688
	if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
1689 1690 1691 1692 1693 1694
		/* A device might become IO incapable very soon during
		 * probe, before the admin queue is configured. Thus,
		 * queue_count can be 0 here.
		 */
		if (dev->queue_count)
			nvme_suspend_queue(dev->queues[0]);
K
Keith Busch 已提交
1695
	} else {
1696
		nvme_disable_io_queues(dev, queues);
1697
		nvme_disable_admin_queue(dev, shutdown);
K
Keith Busch 已提交
1698
	}
1699
	nvme_pci_disable(dev);
1700

1701 1702
	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
1703
	mutex_unlock(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
1704 1705
}

M
Matthew Wilcox 已提交
1706 1707
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
1708
	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
M
Matthew Wilcox 已提交
1709 1710 1711 1712
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

1713
	/* Optimisation for I/Os between 4k and 128k */
1714
	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
1715 1716 1717 1718 1719
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1720 1721 1722 1723 1724 1725
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
1726
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
1727 1728
}

1729
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1730
{
1731
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1732

1733
	put_device(dev->dev);
1734 1735
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
1736 1737
	if (dev->ctrl.admin_q)
		blk_put_queue(dev->ctrl.admin_q);
1738
	kfree(dev->queues);
1739
	kfree(dev->ctrl.opal_dev);
1740 1741 1742
	kfree(dev);
}

1743 1744
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
1745
	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
1746 1747

	kref_get(&dev->ctrl.kref);
1748
	nvme_dev_disable(dev, false);
1749 1750 1751 1752
	if (!schedule_work(&dev->remove_work))
		nvme_put_ctrl(&dev->ctrl);
}

1753
static void nvme_reset_work(struct work_struct *work)
1754
{
1755
	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1756
	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
1757
	int result = -ENODEV;
1758

1759
	if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
1760
		goto out;
1761

1762 1763 1764 1765
	/*
	 * If we're called to reset a live controller first shut it down before
	 * moving on.
	 */
1766
	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1767
		nvme_dev_disable(dev, false);
1768

1769
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
K
Keith Busch 已提交
1770 1771
		goto out;

1772
	result = nvme_pci_enable(dev);
1773
	if (result)
1774
		goto out;
1775 1776 1777

	result = nvme_configure_admin_queue(dev);
	if (result)
1778
		goto out;
1779

M
Matias Bjørling 已提交
1780
	nvme_init_queue(dev->queues[0], 0);
K
Keith Busch 已提交
1781 1782
	result = nvme_alloc_admin_tags(dev);
	if (result)
1783
		goto out;
1784

1785 1786
	result = nvme_init_identify(&dev->ctrl);
	if (result)
1787
		goto out;
1788

1789
	if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) {
1790 1791 1792
		dev->ctrl.opal_dev =
			init_opal_dev(&dev->ctrl, &nvme_sec_submit);
	}
1793 1794

	if (was_suspend)
1795
		opal_unlock_from_suspend(dev->ctrl.opal_dev);
1796

1797
	result = nvme_setup_io_queues(dev);
1798
	if (result)
1799
		goto out;
1800

1801 1802 1803 1804 1805 1806
	/*
	 * A controller that can not execute IO typically requires user
	 * intervention to correct. For such degraded controllers, the driver
	 * should not submit commands the user did not request, so skip
	 * registering for asynchronous event notification on this condition.
	 */
1807 1808
	if (dev->online_queues > 1)
		nvme_queue_async_events(&dev->ctrl);
1809

1810
	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1811

1812 1813 1814 1815
	/*
	 * Keep the controller around but remove all namespaces if we don't have
	 * any working I/O queue.
	 */
1816
	if (dev->online_queues < 2) {
1817
		dev_warn(dev->ctrl.device, "IO queues not created\n");
1818
		nvme_kill_queues(&dev->ctrl);
1819
		nvme_remove_namespaces(&dev->ctrl);
1820
	} else {
1821
		nvme_start_queues(&dev->ctrl);
1822 1823 1824
		nvme_dev_add(dev);
	}

1825 1826 1827 1828
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
		dev_warn(dev->ctrl.device, "failed to mark controller live\n");
		goto out;
	}
1829 1830

	if (dev->online_queues > 1)
1831
		nvme_queue_scan(&dev->ctrl);
1832
	return;
1833

1834
 out:
1835
	nvme_remove_dead_ctrl(dev, result);
1836 1837
}

1838
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
K
Keith Busch 已提交
1839
{
1840
	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1841
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
1842

1843
	nvme_kill_queues(&dev->ctrl);
K
Keith Busch 已提交
1844
	if (pci_get_drvdata(pdev))
K
Keith Busch 已提交
1845
		device_release_driver(&pdev->dev);
1846
	nvme_put_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1847 1848
}

1849
static int nvme_reset(struct nvme_dev *dev)
K
Keith Busch 已提交
1850
{
1851
	if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
1852
		return -ENODEV;
K
Keith Busch 已提交
1853 1854
	if (work_busy(&dev->reset_work))
		return -ENODEV;
C
Christoph Hellwig 已提交
1855 1856 1857
	if (!queue_work(nvme_workq, &dev->reset_work))
		return -EBUSY;
	return 0;
K
Keith Busch 已提交
1858 1859
}

1860
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
T
Tejun Heo 已提交
1861
{
1862
	*val = readl(to_nvme_dev(ctrl)->bar + off);
1863
	return 0;
T
Tejun Heo 已提交
1864 1865
}

1866
static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1867
{
1868 1869 1870
	writel(val, to_nvme_dev(ctrl)->bar + off);
	return 0;
}
1871

1872 1873 1874 1875
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
	*val = readq(to_nvme_dev(ctrl)->bar + off);
	return 0;
1876 1877
}

1878 1879
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
K
Keith Busch 已提交
1880 1881 1882 1883 1884 1885
	struct nvme_dev *dev = to_nvme_dev(ctrl);
	int ret = nvme_reset(dev);

	if (!ret)
		flush_work(&dev->reset_work);
	return ret;
1886
}
1887

1888
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
M
Ming Lin 已提交
1889
	.name			= "pcie",
1890
	.module			= THIS_MODULE,
1891
	.reg_read32		= nvme_pci_reg_read32,
1892
	.reg_write32		= nvme_pci_reg_write32,
1893
	.reg_read64		= nvme_pci_reg_read64,
1894
	.reset_ctrl		= nvme_pci_reset_ctrl,
1895
	.free_ctrl		= nvme_pci_free_ctrl,
1896
	.submit_async_event	= nvme_pci_submit_async_event,
1897
};
1898

1899 1900 1901 1902
static int nvme_dev_map(struct nvme_dev *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

1903
	if (pci_request_mem_regions(pdev, "nvme"))
1904 1905 1906 1907 1908 1909
		return -ENODEV;

	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar)
		goto release;

M
Max Gurtovoy 已提交
1910
	return 0;
1911
  release:
M
Max Gurtovoy 已提交
1912 1913
	pci_release_mem_regions(pdev);
	return -ENODEV;
1914 1915
}

1916
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
1917
{
M
Matias Bjørling 已提交
1918
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
1919 1920
	struct nvme_dev *dev;

M
Matias Bjørling 已提交
1921 1922
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
1923
		set_dev_node(&pdev->dev, first_memory_node);
M
Matias Bjørling 已提交
1924 1925

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1926 1927
	if (!dev)
		return -ENOMEM;
M
Matias Bjørling 已提交
1928 1929
	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
							GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1930 1931 1932
	if (!dev->queues)
		goto free;

1933
	dev->dev = get_device(&pdev->dev);
K
Keith Busch 已提交
1934
	pci_set_drvdata(pdev, dev);
1935

1936 1937 1938 1939
	result = nvme_dev_map(dev);
	if (result)
		goto free;

1940
	INIT_WORK(&dev->reset_work, nvme_reset_work);
1941
	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
1942 1943
	setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
		(unsigned long)dev);
1944
	mutex_init(&dev->shutdown_lock);
K
Keith Busch 已提交
1945
	init_completion(&dev->ioq_wait);
M
Matthew Wilcox 已提交
1946

M
Matthew Wilcox 已提交
1947 1948
	result = nvme_setup_prp_pools(dev);
	if (result)
K
Keith Busch 已提交
1949
		goto put_pci;
1950

1951 1952
	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
			id->driver_data);
1953
	if (result)
1954
		goto release_pools;
1955

1956 1957
	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

1958
	queue_work(nvme_workq, &dev->reset_work);
M
Matthew Wilcox 已提交
1959 1960
	return 0;

1961
 release_pools:
M
Matthew Wilcox 已提交
1962
	nvme_release_prp_pools(dev);
K
Keith Busch 已提交
1963
 put_pci:
1964
	put_device(dev->dev);
1965
	nvme_dev_unmap(dev);
M
Matthew Wilcox 已提交
1966 1967 1968 1969 1970 1971
 free:
	kfree(dev->queues);
	kfree(dev);
	return result;
}

1972 1973
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
K
Keith Busch 已提交
1974
	struct nvme_dev *dev = pci_get_drvdata(pdev);
1975

K
Keith Busch 已提交
1976
	if (prepare)
1977
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
1978
	else
K
Keith Busch 已提交
1979
		nvme_reset(dev);
1980 1981
}

1982 1983 1984
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
1985
	nvme_dev_disable(dev, true);
1986 1987
}

1988 1989 1990 1991 1992
/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
1993
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
1994 1995
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
1996

1997 1998
	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);

K
Keith Busch 已提交
1999
	pci_set_drvdata(pdev, NULL);
2000

2001
	if (!pci_device_is_present(pdev)) {
2002
		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2003 2004
		nvme_dev_disable(dev, false);
	}
2005

K
Keith Busch 已提交
2006
	flush_work(&dev->reset_work);
2007
	nvme_uninit_ctrl(&dev->ctrl);
2008
	nvme_dev_disable(dev, true);
M
Matias Bjørling 已提交
2009
	nvme_dev_remove_admin(dev);
2010
	nvme_free_queues(dev, 0);
2011
	nvme_release_cmb(dev);
K
Keith Busch 已提交
2012
	nvme_release_prp_pools(dev);
2013
	nvme_dev_unmap(dev);
2014
	nvme_put_ctrl(&dev->ctrl);
M
Matthew Wilcox 已提交
2015 2016
}

K
Keith Busch 已提交
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
{
	int ret = 0;

	if (numvfs == 0) {
		if (pci_vfs_assigned(pdev)) {
			dev_warn(&pdev->dev,
				"Cannot disable SR-IOV VFs while assigned\n");
			return -EPERM;
		}
		pci_disable_sriov(pdev);
		return 0;
	}

	ret = pci_enable_sriov(pdev, numvfs);
	return ret ? ret : numvfs;
}

2035
#ifdef CONFIG_PM_SLEEP
2036 2037 2038 2039 2040
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2041
	nvme_dev_disable(ndev, true);
2042 2043 2044 2045 2046 2047 2048 2049
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

K
Keith Busch 已提交
2050
	nvme_reset(ndev);
K
Keith Busch 已提交
2051
	return 0;
2052
}
2053
#endif
2054 2055

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2056

K
Keith Busch 已提交
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	/*
	 * A frozen channel requires a reset. When detected, this method will
	 * shutdown the controller to quiesce. The controller will be restarted
	 * after the slot reset through driver's slot_reset callback.
	 */
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
K
Keith Busch 已提交
2071 2072
		dev_warn(dev->ctrl.device,
			"frozen state error detected, reset controller\n");
2073
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2074 2075
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
K
Keith Busch 已提交
2076 2077
		dev_warn(dev->ctrl.device,
			"failure state error detected, request disconnect\n");
K
Keith Busch 已提交
2078 2079 2080 2081 2082 2083 2084 2085 2086
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

2087
	dev_info(dev->ctrl.device, "restart after slot reset\n");
K
Keith Busch 已提交
2088
	pci_restore_state(pdev);
K
Keith Busch 已提交
2089
	nvme_reset(dev);
K
Keith Busch 已提交
2090 2091 2092 2093 2094 2095 2096 2097
	return PCI_ERS_RESULT_RECOVERED;
}

static void nvme_error_resume(struct pci_dev *pdev)
{
	pci_cleanup_aer_uncorrect_error_status(pdev);
}

2098
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2099 2100 2101
	.error_detected	= nvme_error_detected,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2102
	.reset_notify	= nvme_reset_notify,
M
Matthew Wilcox 已提交
2103 2104
};

2105
static const struct pci_device_id nvme_id_table[] = {
2106
	{ PCI_VDEVICE(INTEL, 0x0953),
2107 2108
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DISCARD_ZEROES, },
2109 2110 2111 2112 2113 2114
	{ PCI_VDEVICE(INTEL, 0x0a53),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DISCARD_ZEROES, },
	{ PCI_VDEVICE(INTEL, 0x0a54),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DISCARD_ZEROES, },
2115 2116
	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2117 2118
	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2119 2120
	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
M
Matthew Wilcox 已提交
2121
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2122
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2123
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
M
Matthew Wilcox 已提交
2124 2125 2126 2127 2128 2129 2130 2131
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2132
	.remove		= nvme_remove,
2133
	.shutdown	= nvme_shutdown,
2134 2135 2136
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
K
Keith Busch 已提交
2137
	.sriov_configure = nvme_pci_sriov_configure,
M
Matthew Wilcox 已提交
2138 2139 2140 2141 2142
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2143
	int result;
2144

2145
	nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
K
Keith Busch 已提交
2146
	if (!nvme_workq)
2147
		return -ENOMEM;
K
Keith Busch 已提交
2148

2149 2150
	result = pci_register_driver(&nvme_driver);
	if (result)
2151
		destroy_workqueue(nvme_workq);
M
Matthew Wilcox 已提交
2152 2153 2154 2155 2156 2157
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
K
Keith Busch 已提交
2158
	destroy_workqueue(nvme_workq);
2159
	_nvme_check_size();
M
Matthew Wilcox 已提交
2160 2161 2162 2163
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2164
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2165 2166
module_init(nvme_init);
module_exit(nvme_exit);