pci.c 54.3 KB
Newer Older
M
Matthew Wilcox 已提交
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

K
Keith Busch 已提交
15
#include <linux/aer.h>
16
#include <linux/bitops.h>
M
Matthew Wilcox 已提交
17
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
18
#include <linux/blk-mq.h>
K
Keith Busch 已提交
19
#include <linux/cpu.h>
20
#include <linux/delay.h>
M
Matthew Wilcox 已提交
21 22 23
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
K
Keith Busch 已提交
24
#include <linux/hdreg.h>
25
#include <linux/idr.h>
M
Matthew Wilcox 已提交
26 27 28 29 30 31 32 33
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
34
#include <linux/mutex.h>
M
Matthew Wilcox 已提交
35
#include <linux/pci.h>
36
#include <linux/poison.h>
37
#include <linux/ptrace.h>
M
Matthew Wilcox 已提交
38 39
#include <linux/sched.h>
#include <linux/slab.h>
K
Keith Busch 已提交
40
#include <linux/t10-pi.h>
41
#include <linux/timer.h>
M
Matthew Wilcox 已提交
42
#include <linux/types.h>
43
#include <linux/io-64-nonatomic-lo-hi.h>
K
Keith Busch 已提交
44
#include <asm/unaligned.h>
45

46 47
#include "nvme.h"

48
#define NVME_Q_DEPTH		1024
J
Jens Axboe 已提交
49
#define NVME_AQ_DEPTH		256
M
Matthew Wilcox 已提交
50 51
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
52 53 54 55 56 57 58
		
/*
 * We handle AEN commands ourselves and don't even let the
 * block layer know about them.
 */
#define NVME_NR_AEN_COMMANDS	1
#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
59

60 61 62
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

63 64 65 66
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

K
Keith Busch 已提交
67
static struct workqueue_struct *nvme_workq;
68

69 70
struct nvme_dev;
struct nvme_queue;
71

72
static int nvme_reset(struct nvme_dev *dev);
J
Jens Axboe 已提交
73
static void nvme_process_cq(struct nvme_queue *nvmeq);
74
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
75

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue **queues;
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned queue_count;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
	u32 db_stride;
	struct msix_entry *entry;
	void __iomem *bar;
	struct work_struct reset_work;
95
	struct work_struct remove_work;
96
	struct work_struct async_work;
97
	struct timer_list watchdog_timer;
98
	struct mutex shutdown_lock;
99 100 101 102 103 104
	bool subsystem;
	void __iomem *cmb;
	dma_addr_t cmb_dma_addr;
	u64 cmb_size;
	u32 cmbsz;
	struct nvme_ctrl ctrl;
K
Keith Busch 已提交
105
	struct completion ioq_wait;
K
Keith Busch 已提交
106
};
107

108 109 110 111 112
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

M
Matthew Wilcox 已提交
113 114 115 116 117 118
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
119
	struct nvme_dev *dev;
120
	char irqname[24];	/* nvme4294967295-65535\0 */
M
Matthew Wilcox 已提交
121 122
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
123
	struct nvme_command __iomem *sq_cmds_io;
M
Matthew Wilcox 已提交
124
	volatile struct nvme_completion *cqes;
125
	struct blk_mq_tags **tags;
M
Matthew Wilcox 已提交
126 127 128 129
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
J
Jens Axboe 已提交
130
	s16 cq_vector;
M
Matthew Wilcox 已提交
131 132
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
133
	u16 qid;
134 135
	u8 cq_phase;
	u8 cqe_seen;
M
Matthew Wilcox 已提交
136 137
};

138 139 140
/*
 * The nvme_iod describes the data in an I/O, including the list of PRP
 * entries.  You can't see it in this data structure because C doesn't let
C
Christoph Hellwig 已提交
141
 * me express that.  Use nvme_init_iod to ensure there's enough space
142 143 144
 * allocated to store the PRP list.
 */
struct nvme_iod {
C
Christoph Hellwig 已提交
145 146
	struct nvme_queue *nvmeq;
	int aborted;
147 148 149 150
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	int length;		/* Of data, in bytes */
	dma_addr_t first_dma;
151
	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
C
Christoph Hellwig 已提交
152 153
	struct scatterlist *sg;
	struct scatterlist inline_sg[0];
M
Matthew Wilcox 已提交
154 155 156 157 158 159 160 161 162 163 164 165
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
166
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
167
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
168 169 170 171
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
172
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
M
Matthew Wilcox 已提交
173 174
}

175 176 177 178
/*
 * Max size of iod being embedded in the request payload
 */
#define NVME_INT_PAGES		2
179
#define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
180 181 182 183 184 185 186 187

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
188 189
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
190 191 192
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

C
Christoph Hellwig 已提交
193 194
static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg)
195
{
C
Christoph Hellwig 已提交
196 197 198
	return sizeof(__le64 *) * nvme_npages(size, dev) +
			sizeof(struct scatterlist) * nseg;
}
199

C
Christoph Hellwig 已提交
200 201 202 203
static unsigned int nvme_cmd_size(struct nvme_dev *dev)
{
	return sizeof(struct nvme_iod) +
		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
204 205
}

M
Matias Bjørling 已提交
206 207
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
208
{
M
Matias Bjørling 已提交
209 210 211
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[0];

212 213 214 215
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

M
Matias Bjørling 已提交
216
	hctx->driver_data = nvmeq;
217
	nvmeq->tags = &dev->admin_tagset.tags[0];
M
Matias Bjørling 已提交
218
	return 0;
219 220
}

221 222 223 224 225 226 227
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

M
Matias Bjørling 已提交
228 229 230
static int nvme_admin_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
231
{
M
Matias Bjørling 已提交
232
	struct nvme_dev *dev = data;
C
Christoph Hellwig 已提交
233
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
234 235 236
	struct nvme_queue *nvmeq = dev->queues[0];

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
237
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
238
	return 0;
239 240
}

M
Matias Bjørling 已提交
241 242
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
243
{
M
Matias Bjørling 已提交
244
	struct nvme_dev *dev = data;
245
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
M
Matias Bjørling 已提交
246

247 248
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
M
Matthew Wilcox 已提交
249

250
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
M
Matias Bjørling 已提交
251 252
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
253 254
}

M
Matias Bjørling 已提交
255 256 257
static int nvme_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
M
Matthew Wilcox 已提交
258
{
M
Matias Bjørling 已提交
259
	struct nvme_dev *dev = data;
C
Christoph Hellwig 已提交
260
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matias Bjørling 已提交
261 262 263
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
264
	iod->nvmeq = nvmeq;
M
Matias Bjørling 已提交
265 266 267
	return 0;
}

268 269
static void nvme_complete_async_event(struct nvme_dev *dev,
		struct nvme_completion *cqe)
M
Matias Bjørling 已提交
270
{
271 272
	u16 status = le16_to_cpu(cqe->status) >> 1;
	u32 result = le32_to_cpu(cqe->result);
M
Matias Bjørling 已提交
273

274
	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
275
		++dev->ctrl.event_limit;
276 277 278
		queue_work(nvme_workq, &dev->async_work);
	}

K
Keith Busch 已提交
279 280 281 282 283
	if (status != NVME_SC_SUCCESS)
		return;

	switch (result & 0xff07) {
	case NVME_AER_NOTICE_NS_CHANGED:
284
		dev_info(dev->ctrl.device, "rescanning\n");
285
		nvme_queue_scan(&dev->ctrl);
K
Keith Busch 已提交
286
	default:
287
		dev_warn(dev->ctrl.device, "async event result %08x\n", result);
M
Matias Bjørling 已提交
288
	}
M
Matthew Wilcox 已提交
289 290 291
}

/**
292
 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
293 294 295 296 297
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
298 299
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
						struct nvme_command *cmd)
M
Matthew Wilcox 已提交
300
{
M
Matias Bjørling 已提交
301 302
	u16 tail = nvmeq->sq_tail;

303 304 305 306 307
	if (nvmeq->sq_cmds_io)
		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
	else
		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));

M
Matthew Wilcox 已提交
308 309
	if (++tail == nvmeq->q_depth)
		tail = 0;
310
	writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
311 312 313
	nvmeq->sq_tail = tail;
}

C
Christoph Hellwig 已提交
314
static __le64 **iod_list(struct request *req)
M
Matthew Wilcox 已提交
315
{
C
Christoph Hellwig 已提交
316 317
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	return (__le64 **)(iod->sg + req->nr_phys_segments);
M
Matthew Wilcox 已提交
318 319
}

M
Ming Lin 已提交
320 321
static int nvme_init_iod(struct request *rq, unsigned size,
		struct nvme_dev *dev)
322
{
C
Christoph Hellwig 已提交
323 324
	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
	int nseg = rq->nr_phys_segments;
325

C
Christoph Hellwig 已提交
326 327 328 329 330 331
	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
		if (!iod->sg)
			return BLK_MQ_RQ_QUEUE_BUSY;
	} else {
		iod->sg = iod->inline_sg;
332 333
	}

C
Christoph Hellwig 已提交
334 335 336 337 338
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->length = size;
	return 0;
339 340
}

C
Christoph Hellwig 已提交
341
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
342
{
C
Christoph Hellwig 已提交
343
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
344
	const int last_prp = dev->ctrl.page_size / 8 - 1;
345
	int i;
C
Christoph Hellwig 已提交
346
	__le64 **list = iod_list(req);
347 348
	dma_addr_t prp_dma = iod->first_dma;

M
Ming Lin 已提交
349 350 351
	if (req->cmd_flags & REQ_DISCARD)
		kfree(req->completion_data);

352 353 354 355 356 357 358 359
	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
	for (i = 0; i < iod->npages; i++) {
		__le64 *prp_list = list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
		prp_dma = next_prp_dma;
	}
360

C
Christoph Hellwig 已提交
361 362
	if (iod->sg != iod->inline_sg)
		kfree(iod->sg);
K
Keith Busch 已提交
363 364
}

365
#ifdef CONFIG_BLK_DEV_INTEGRITY
K
Keith Busch 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == v)
		pi->ref_tag = cpu_to_be32(p);
}

static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == p)
		pi->ref_tag = cpu_to_be32(v);
}

/**
 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
 *
 * The virtual start sector is the one that was originally submitted by the
 * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
 * start sector may be different. Remap protection information to match the
 * physical LBA on writes, and back to the original seed on reads.
 *
 * Type 0 and 3 do not have a ref tag, so no remapping required.
 */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
	struct nvme_ns *ns = req->rq_disk->private_data;
	struct bio_integrity_payload *bip;
	struct t10_pi_tuple *pi;
	void *p, *pmap;
	u32 i, nlb, ts, phys, virt;

	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
		return;

	bip = bio_integrity(req->bio);
	if (!bip)
		return;

	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;

	p = pmap;
	virt = bip_get_seed(bip);
	phys = nvme_block_nr(ns, blk_rq_pos(req));
	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
410
	ts = ns->disk->queue->integrity.tuple_size;
K
Keith Busch 已提交
411 412 413 414 415 416 417 418

	for (i = 0; i < nlb; i++, virt++, phys++) {
		pi = (struct t10_pi_tuple *)p;
		dif_swap(phys, virt, pi);
		p += ts;
	}
	kunmap_atomic(pmap);
}
419 420 421 422 423 424 425 426 427 428 429 430 431
#else /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
}
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
#endif

C
Christoph Hellwig 已提交
432
static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
433
		int total_len)
M
Matthew Wilcox 已提交
434
{
C
Christoph Hellwig 已提交
435
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
436
	struct dma_pool *pool;
437 438
	int length = total_len;
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
439 440
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
441
	u32 page_size = dev->ctrl.page_size;
442
	int offset = dma_addr & (page_size - 1);
443
	__le64 *prp_list;
C
Christoph Hellwig 已提交
444
	__le64 **list = iod_list(req);
445
	dma_addr_t prp_dma;
446
	int nprps, i;
M
Matthew Wilcox 已提交
447

448
	length -= (page_size - offset);
M
Matthew Wilcox 已提交
449
	if (length <= 0)
450
		return true;
M
Matthew Wilcox 已提交
451

452
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
453
	if (dma_len) {
454
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
455 456 457 458 459 460
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

461
	if (length <= page_size) {
462
		iod->first_dma = dma_addr;
463
		return true;
464 465
	}

466
	nprps = DIV_ROUND_UP(length, page_size);
467 468
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
469
		iod->npages = 0;
470 471
	} else {
		pool = dev->prp_page_pool;
472
		iod->npages = 1;
473 474
	}

475
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
476
	if (!prp_list) {
477
		iod->first_dma = dma_addr;
478
		iod->npages = -1;
479
		return false;
480
	}
481 482
	list[0] = prp_list;
	iod->first_dma = prp_dma;
483 484
	i = 0;
	for (;;) {
485
		if (i == page_size >> 3) {
486
			__le64 *old_prp_list = prp_list;
487
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
488
			if (!prp_list)
489
				return false;
490
			list[iod->npages++] = prp_list;
491 492 493
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
494 495
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
496 497 498
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
499 500 501 502 503 504 505 506
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
507 508
	}

509
	return true;
M
Matthew Wilcox 已提交
510 511
}

C
Christoph Hellwig 已提交
512
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
M
Ming Lin 已提交
513
		unsigned size, struct nvme_command *cmnd)
514
{
C
Christoph Hellwig 已提交
515
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
516 517 518 519
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
	int ret = BLK_MQ_RQ_QUEUE_ERROR;
520

C
Christoph Hellwig 已提交
521 522 523 524
	sg_init_table(iod->sg, req->nr_phys_segments);
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
525

C
Christoph Hellwig 已提交
526 527 528
	ret = BLK_MQ_RQ_QUEUE_BUSY;
	if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
		goto out;
529

M
Ming Lin 已提交
530
	if (!nvme_setup_prps(dev, req, size))
C
Christoph Hellwig 已提交
531
		goto out_unmap;
532

C
Christoph Hellwig 已提交
533 534 535 536
	ret = BLK_MQ_RQ_QUEUE_ERROR;
	if (blk_integrity_rq(req)) {
		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
			goto out_unmap;
537

538 539
		sg_init_table(&iod->meta_sg, 1);
		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
C
Christoph Hellwig 已提交
540
			goto out_unmap;
541

C
Christoph Hellwig 已提交
542 543
		if (rq_data_dir(req))
			nvme_dif_remap(req, nvme_dif_prep);
544

545
		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
C
Christoph Hellwig 已提交
546
			goto out_unmap;
547
	}
M
Matthew Wilcox 已提交
548

C
Christoph Hellwig 已提交
549 550 551
	cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
	if (blk_integrity_rq(req))
552
		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
C
Christoph Hellwig 已提交
553
	return BLK_MQ_RQ_QUEUE_OK;
M
Matthew Wilcox 已提交
554

C
Christoph Hellwig 已提交
555 556 557 558
out_unmap:
	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
	return ret;
M
Matthew Wilcox 已提交
559 560
}

C
Christoph Hellwig 已提交
561
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
562
{
C
Christoph Hellwig 已提交
563
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
564 565 566 567 568 569 570 571
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
			if (!rq_data_dir(req))
				nvme_dif_remap(req, nvme_dif_complete);
572
			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
K
Keith Busch 已提交
573
		}
574
	}
K
Keith Busch 已提交
575

C
Christoph Hellwig 已提交
576
	nvme_free_iod(dev, req);
577
}
M
Matthew Wilcox 已提交
578

579 580 581
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
M
Matias Bjørling 已提交
582 583
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
584
{
M
Matias Bjørling 已提交
585 586
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
587
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
588
	struct request *req = bd->rq;
C
Christoph Hellwig 已提交
589
	struct nvme_command cmnd;
M
Ming Lin 已提交
590
	unsigned map_len;
C
Christoph Hellwig 已提交
591
	int ret = BLK_MQ_RQ_QUEUE_OK;
592

K
Keith Busch 已提交
593 594 595 596 597
	/*
	 * If formated with metadata, require the block layer provide a buffer
	 * unless this namespace is formated such that the metadata can be
	 * stripped/generated by the controller with PRACT=1.
	 */
598
	if (ns && ns->ms && !blk_integrity_rq(req)) {
599 600
		if (!(ns->pi_type && ns->ms == 8) &&
					req->cmd_type != REQ_TYPE_DRV_PRIV) {
601
			blk_mq_end_request(req, -EFAULT);
K
Keith Busch 已提交
602 603 604 605
			return BLK_MQ_RQ_QUEUE_OK;
		}
	}

M
Ming Lin 已提交
606 607
	map_len = nvme_map_len(req);
	ret = nvme_init_iod(req, map_len, dev);
C
Christoph Hellwig 已提交
608 609
	if (ret)
		return ret;
M
Matias Bjørling 已提交
610

M
Ming Lin 已提交
611
	ret = nvme_setup_cmd(ns, req, &cmnd);
M
Ming Lin 已提交
612 613 614 615 616
	if (ret)
		goto out;

	if (req->nr_phys_segments)
		ret = nvme_map_data(dev, req, map_len, &cmnd);
M
Matias Bjørling 已提交
617

C
Christoph Hellwig 已提交
618 619
	if (ret)
		goto out;
M
Matias Bjørling 已提交
620

C
Christoph Hellwig 已提交
621
	cmnd.common.command_id = req->tag;
622
	blk_mq_start_request(req);
M
Matias Bjørling 已提交
623

C
Christoph Hellwig 已提交
624
	spin_lock_irq(&nvmeq->q_lock);
625
	if (unlikely(nvmeq->cq_vector < 0)) {
626 627 628 629
		if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
			ret = BLK_MQ_RQ_QUEUE_BUSY;
		else
			ret = BLK_MQ_RQ_QUEUE_ERROR;
630 631 632
		spin_unlock_irq(&nvmeq->q_lock);
		goto out;
	}
C
Christoph Hellwig 已提交
633
	__nvme_submit_cmd(nvmeq, &cmnd);
M
Matias Bjørling 已提交
634 635 636
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
	return BLK_MQ_RQ_QUEUE_OK;
C
Christoph Hellwig 已提交
637
out:
C
Christoph Hellwig 已提交
638
	nvme_free_iod(dev, req);
C
Christoph Hellwig 已提交
639
	return ret;
M
Matthew Wilcox 已提交
640
}
K
Keith Busch 已提交
641

642 643
static void nvme_complete_rq(struct request *req)
{
C
Christoph Hellwig 已提交
644 645
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_dev *dev = iod->nvmeq->dev;
646
	int error = 0;
K
Keith Busch 已提交
647

C
Christoph Hellwig 已提交
648
	nvme_unmap_data(dev, req);
K
Keith Busch 已提交
649

650 651 652 653
	if (unlikely(req->errors)) {
		if (nvme_req_needs_retry(req, req->errors)) {
			nvme_requeue_req(req);
			return;
K
Keith Busch 已提交
654
		}
655

656 657 658 659 660
		if (req->cmd_type == REQ_TYPE_DRV_PRIV)
			error = req->errors;
		else
			error = nvme_error_status(req->errors);
	}
M
Matias Bjørling 已提交
661

C
Christoph Hellwig 已提交
662
	if (unlikely(iod->aborted)) {
663
		dev_warn(dev->ctrl.device,
664 665 666
			"completing aborted command with status: %04x\n",
			req->errors);
	}
M
Matias Bjørling 已提交
667

668
	blk_mq_end_request(req, error);
M
Matthew Wilcox 已提交
669 670
}

671 672 673 674 675 676 677
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
		u16 phase)
{
	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}

J
Jens Axboe 已提交
678
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
M
Matthew Wilcox 已提交
679
{
M
Matthew Wilcox 已提交
680
	u16 head, phase;
M
Matthew Wilcox 已提交
681 682

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
683
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
684

685
	while (nvme_cqe_valid(nvmeq, head, phase)) {
M
Matthew Wilcox 已提交
686
		struct nvme_completion cqe = nvmeq->cqes[head];
687
		struct request *req;
688

M
Matthew Wilcox 已提交
689 690
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
691
			phase = !phase;
M
Matthew Wilcox 已提交
692
		}
693

J
Jens Axboe 已提交
694 695
		if (tag && *tag == cqe.command_id)
			*tag = -1;
696

697
		if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
698
			dev_warn(nvmeq->dev->ctrl.device,
699 700 701 702 703
				"invalid id %d completed on queue %d\n",
				cqe.command_id, le16_to_cpu(cqe.sq_id));
			continue;
		}

704 705 706 707 708 709 710 711 712 713 714 715
		/*
		 * AEN requests are special as they don't time out and can
		 * survive any kind of queue freeze and often don't respond to
		 * aborts.  We don't even bother to allocate a struct request
		 * for them but rather special case them here.
		 */
		if (unlikely(nvmeq->qid == 0 &&
				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
			nvme_complete_async_event(nvmeq->dev, &cqe);
			continue;
		}

716
		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
717 718
		if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
			memcpy(req->special, &cqe, sizeof(cqe));
719
		blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
720

M
Matthew Wilcox 已提交
721 722 723 724 725 726 727 728
	}

	/* If the controller ignores the cq head doorbell and continuously
	 * writes to the queue, it is theoretically possible to wrap around
	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
	 * requires that 0.1% of your interrupts are handled, so this isn't
	 * a big problem.
	 */
M
Matthew Wilcox 已提交
729
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
J
Jens Axboe 已提交
730
		return;
M
Matthew Wilcox 已提交
731

732 733
	if (likely(nvmeq->cq_vector >= 0))
		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
M
Matthew Wilcox 已提交
734
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
735
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
736

737
	nvmeq->cqe_seen = 1;
J
Jens Axboe 已提交
738 739 740 741 742
}

static void nvme_process_cq(struct nvme_queue *nvmeq)
{
	__nvme_process_cq(nvmeq, NULL);
M
Matthew Wilcox 已提交
743 744 745
}

static irqreturn_t nvme_irq(int irq, void *data)
746 747 748 749
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
750 751 752
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
753 754 755 756 757 758 759
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
760 761 762
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
763 764
}

J
Jens Axboe 已提交
765 766 767 768
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

769
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
J
Jens Axboe 已提交
770 771 772 773 774 775 776 777 778 779 780
		spin_lock_irq(&nvmeq->q_lock);
		__nvme_process_cq(nvmeq, &tag);
		spin_unlock_irq(&nvmeq->q_lock);

		if (tag == -1)
			return 1;
	}

	return 0;
}

781
static void nvme_async_event_work(struct work_struct *work)
M
Matthew Wilcox 已提交
782
{
783 784
	struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
	struct nvme_queue *nvmeq = dev->queues[0];
M
Matias Bjørling 已提交
785
	struct nvme_command c;
M
Matthew Wilcox 已提交
786

M
Matias Bjørling 已提交
787 788
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
789

790 791 792 793 794 795 796
	spin_lock_irq(&nvmeq->q_lock);
	while (dev->ctrl.event_limit > 0) {
		c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
			--dev->ctrl.event_limit;
		__nvme_submit_cmd(nvmeq, &c);
	}
	spin_unlock_irq(&nvmeq->q_lock);
797 798
}

M
Matthew Wilcox 已提交
799
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
800
{
M
Matthew Wilcox 已提交
801 802 803 804 805 806
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

807
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
808 809 810 811 812 813 814 815
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

816 817 818 819
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
820 821 822 823 824 825 826 827
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

828
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
829 830 831 832 833 834 835 836
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

837 838 839 840
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
841 842 843 844 845 846 847 848
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

849
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
850 851 852 853 854 855 856 857 858 859 860 861
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

862
static void abort_endio(struct request *req, int error)
863
{
C
Christoph Hellwig 已提交
864 865
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
866
	u16 status = req->errors;
867

868
	dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
869 870
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
871 872
}

873
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
K
Keith Busch 已提交
874
{
C
Christoph Hellwig 已提交
875 876
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
K
Keith Busch 已提交
877
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
878 879
	struct request *abort_req;
	struct nvme_command cmd;
K
Keith Busch 已提交
880

881
	/*
882 883 884 885 886
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 */
887
	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
888
		dev_warn(dev->ctrl.device,
889 890
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
891
		nvme_dev_disable(dev, false);
892 893
		req->errors = NVME_SC_CANCELLED;
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
894 895
	}

896 897 898 899
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
900
	 */
C
Christoph Hellwig 已提交
901
	if (!nvmeq->qid || iod->aborted) {
902
		dev_warn(dev->ctrl.device,
903 904
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
905
		nvme_dev_disable(dev, false);
906
		queue_work(nvme_workq, &dev->reset_work);
K
Keith Busch 已提交
907

908 909 910 911 912 913
		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
		req->errors = NVME_SC_CANCELLED;
		return BLK_EH_HANDLED;
K
Keith Busch 已提交
914 915
	}

C
Christoph Hellwig 已提交
916
	iod->aborted = 1;
K
Keith Busch 已提交
917

918
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
919
		atomic_inc(&dev->ctrl.abort_limit);
920
		return BLK_EH_RESET_TIMER;
921
	}
M
Matias Bjørling 已提交
922

K
Keith Busch 已提交
923 924
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
925
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
926 927
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

928 929 930
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
931 932 933 934 935 936 937 938 939 940 941

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
			BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
K
Keith Busch 已提交
942

943 944 945 946 947 948
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
K
Keith Busch 已提交
949 950
}

951
static void nvme_cancel_io(struct request *req, void *data, bool reserved)
952
{
953
	int status;
K
Keith Busch 已提交
954 955 956

	if (!blk_mq_request_started(req))
		return;
957

958 959
	dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
				"Cancelling I/O %d", req->tag);
M
Matias Bjørling 已提交
960

961
	status = NVME_SC_ABORT_REQ;
K
Keith Busch 已提交
962
	if (blk_queue_dying(req->q))
963 964
		status |= NVME_SC_DNR;
	blk_mq_complete_request(req, status);
M
Matias Bjørling 已提交
965
}
966

M
Matias Bjørling 已提交
967 968
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
969 970
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
971 972
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
973 974 975 976
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

977
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
978 979 980
{
	int i;

981
	for (i = dev->queue_count - 1; i >= lowest; i--) {
M
Matias Bjørling 已提交
982
		struct nvme_queue *nvmeq = dev->queues[i];
983
		dev->queue_count--;
M
Matias Bjørling 已提交
984
		dev->queues[i] = NULL;
K
Keith Busch 已提交
985
		nvme_free_queue(nvmeq);
986
	}
987 988
}

K
Keith Busch 已提交
989 990 991 992 993
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
994
{
K
Keith Busch 已提交
995
	int vector;
M
Matthew Wilcox 已提交
996

997
	spin_lock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
998 999 1000 1001 1002
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
	vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
K
Keith Busch 已提交
1003
	nvmeq->dev->online_queues--;
K
Keith Busch 已提交
1004
	nvmeq->cq_vector = -1;
1005 1006
	spin_unlock_irq(&nvmeq->q_lock);

1007
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1008
		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
1009

M
Matthew Wilcox 已提交
1010 1011
	irq_set_affinity_hint(vector, NULL);
	free_irq(vector, nvmeq);
M
Matthew Wilcox 已提交
1012

K
Keith Busch 已提交
1013 1014
	return 0;
}
M
Matthew Wilcox 已提交
1015

1016
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
K
Keith Busch 已提交
1017
{
1018
	struct nvme_queue *nvmeq = dev->queues[0];
K
Keith Busch 已提交
1019 1020 1021 1022 1023 1024

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

1025 1026 1027 1028 1029
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
		nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
						dev->bar + NVME_REG_CAP));
1030 1031 1032 1033

	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1034 1035
}

1036 1037 1038 1039
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1040 1041
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1042 1043

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1044
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1045
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1046
		q_depth = div_u64(mem_per_q, entry_size);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1064 1065
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
						      dev->ctrl.page_size);
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
		nvmeq->sq_cmds_io = dev->cmb + offset;
	} else {
		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
		if (!nvmeq->sq_cmds)
			return -ENOMEM;
	}

	return 0;
}

M
Matthew Wilcox 已提交
1078
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
K
Keith Busch 已提交
1079
							int depth)
M
Matthew Wilcox 已提交
1080
{
M
Matias Bjørling 已提交
1081
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
M
Matthew Wilcox 已提交
1082 1083 1084
	if (!nvmeq)
		return NULL;

1085
	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
J
Joe Perches 已提交
1086
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1087 1088 1089
	if (!nvmeq->cqes)
		goto free_nvmeq;

1090
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
M
Matthew Wilcox 已提交
1091 1092
		goto free_cqdma;

1093
	nvmeq->q_dmadev = dev->dev;
M
Matthew Wilcox 已提交
1094
	nvmeq->dev = dev;
1095
	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1096
			dev->ctrl.instance, qid);
M
Matthew Wilcox 已提交
1097 1098
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1099
	nvmeq->cq_phase = 1;
1100
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1101
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1102
	nvmeq->qid = qid;
1103
	nvmeq->cq_vector = -1;
M
Matias Bjørling 已提交
1104
	dev->queues[qid] = nvmeq;
1105 1106
	dev->queue_count++;

M
Matthew Wilcox 已提交
1107 1108 1109
	return nvmeq;

 free_cqdma:
1110
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1111 1112 1113 1114 1115 1116
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

1117 1118 1119
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
							const char *name)
{
1120 1121
	if (use_threaded_interrupts)
		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1122
					nvme_irq_check, nvme_irq, IRQF_SHARED,
1123
					name, nvmeq);
1124
	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1125
				IRQF_SHARED, name, nvmeq);
1126 1127
}

1128
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1129
{
1130
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1131

1132
	spin_lock_irq(&nvmeq->q_lock);
1133 1134 1135
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1136
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1137
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
K
Keith Busch 已提交
1138
	dev->online_queues++;
1139
	spin_unlock_irq(&nvmeq->q_lock);
1140 1141 1142 1143 1144 1145
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1146

K
Keith Busch 已提交
1147
	nvmeq->cq_vector = qid - 1;
M
Matthew Wilcox 已提交
1148 1149
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1150
		return result;
M
Matthew Wilcox 已提交
1151 1152 1153 1154 1155

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1156
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
M
Matthew Wilcox 已提交
1157 1158 1159
	if (result < 0)
		goto release_sq;

1160 1161
	nvme_init_queue(nvmeq, qid);
	return result;
M
Matthew Wilcox 已提交
1162 1163 1164 1165 1166

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1167
	return result;
M
Matthew Wilcox 已提交
1168 1169
}

M
Matias Bjørling 已提交
1170
static struct blk_mq_ops nvme_mq_admin_ops = {
1171
	.queue_rq	= nvme_queue_rq,
1172
	.complete	= nvme_complete_rq,
M
Matias Bjørling 已提交
1173 1174
	.map_queue	= blk_mq_map_queue,
	.init_hctx	= nvme_admin_init_hctx,
1175
	.exit_hctx      = nvme_admin_exit_hctx,
M
Matias Bjørling 已提交
1176 1177 1178 1179 1180 1181
	.init_request	= nvme_admin_init_request,
	.timeout	= nvme_timeout,
};

static struct blk_mq_ops nvme_mq_ops = {
	.queue_rq	= nvme_queue_rq,
1182
	.complete	= nvme_complete_rq,
M
Matias Bjørling 已提交
1183 1184 1185 1186
	.map_queue	= blk_mq_map_queue,
	.init_hctx	= nvme_init_hctx,
	.init_request	= nvme_init_request,
	.timeout	= nvme_timeout,
J
Jens Axboe 已提交
1187
	.poll		= nvme_poll,
M
Matias Bjørling 已提交
1188 1189
};

1190 1191
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
1192
	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1193 1194 1195 1196 1197 1198
		/*
		 * If the controller was reset during removal, it's possible
		 * user requests may be waiting on a stopped queue. Start the
		 * queue to flush these to completion.
		 */
		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1199
		blk_cleanup_queue(dev->ctrl.admin_q);
1200 1201 1202 1203
		blk_mq_free_tag_set(&dev->admin_tagset);
	}
}

M
Matias Bjørling 已提交
1204 1205
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
1206
	if (!dev->ctrl.admin_q) {
M
Matias Bjørling 已提交
1207 1208
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
K
Keith Busch 已提交
1209 1210 1211 1212 1213 1214

		/*
		 * Subtract one to leave an empty queue entry for 'Full Queue'
		 * condition. See NVM-Express 1.2 specification, section 4.1.2.
		 */
		dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
M
Matias Bjørling 已提交
1215
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1216
		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1217
		dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
M
Matias Bjørling 已提交
1218 1219 1220 1221 1222
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;

1223 1224
		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
		if (IS_ERR(dev->ctrl.admin_q)) {
M
Matias Bjørling 已提交
1225 1226 1227
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
1228
		if (!blk_get_queue(dev->ctrl.admin_q)) {
1229
			nvme_dev_remove_admin(dev);
1230
			dev->ctrl.admin_q = NULL;
1231 1232
			return -ENODEV;
		}
K
Keith Busch 已提交
1233
	} else
1234
		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
M
Matias Bjørling 已提交
1235 1236 1237 1238

	return 0;
}

1239
static int nvme_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1240
{
1241
	int result;
M
Matthew Wilcox 已提交
1242
	u32 aqa;
1243
	u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
M
Matthew Wilcox 已提交
1244 1245
	struct nvme_queue *nvmeq;

1246
	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
1247 1248
						NVME_CAP_NSSRC(cap) : 0;

1249 1250 1251
	if (dev->subsystem &&
	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1252

1253
	result = nvme_disable_ctrl(&dev->ctrl, cap);
1254 1255
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1256

M
Matias Bjørling 已提交
1257
	nvmeq = dev->queues[0];
1258
	if (!nvmeq) {
K
Keith Busch 已提交
1259
		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1260 1261 1262
		if (!nvmeq)
			return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1263 1264 1265 1266

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1267 1268 1269
	writel(aqa, dev->bar + NVME_REG_AQA);
	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
M
Matthew Wilcox 已提交
1270

1271
	result = nvme_enable_ctrl(&dev->ctrl, cap);
1272
	if (result)
M
Matias Bjørling 已提交
1273 1274
		goto free_nvmeq;

K
Keith Busch 已提交
1275
	nvmeq->cq_vector = 0;
1276
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1277 1278
	if (result) {
		nvmeq->cq_vector = -1;
K
Keith Busch 已提交
1279
		goto free_nvmeq;
1280
	}
1281

M
Matthew Wilcox 已提交
1282
	return result;
M
Matias Bjørling 已提交
1283 1284 1285 1286

 free_nvmeq:
	nvme_free_queues(dev, 0);
	return result;
M
Matthew Wilcox 已提交
1287 1288
}

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{

	/* If true, indicates loss of adapter communication, possibly by a
	 * NVMe Subsystem reset.
	 */
	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);

	/* If there is a reset ongoing, we shouldn't reset again. */
	if (work_busy(&dev->reset_work))
		return false;

	/* We shouldn't reset unless the controller is on fatal error state
	 * _or_ if we lost the communication with it.
	 */
	if (!(csts & NVME_CSTS_CFS) && !nssro)
		return false;

	/* If PCI error recovery process is happening, we cannot reset or
	 * the recovery mechanism will surely fail.
	 */
	if (pci_channel_offline(to_pci_dev(dev->dev)))
		return false;

	return true;
}

1316
static void nvme_watchdog_timer(unsigned long data)
1317
{
1318 1319
	struct nvme_dev *dev = (struct nvme_dev *)data;
	u32 csts = readl(dev->bar + NVME_REG_CSTS);
1320

1321 1322 1323
	/* Skip controllers under certain specific conditions. */
	if (nvme_should_reset(dev, csts)) {
		if (queue_work(nvme_workq, &dev->reset_work))
1324 1325 1326 1327
			dev_warn(dev->dev,
				"Failed status: 0x%x, reset controller.\n",
				csts);
		return;
1328
	}
1329 1330

	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1331 1332
}

1333
static int nvme_create_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1334
{
1335
	unsigned i, max;
1336
	int ret = 0;
K
Keith Busch 已提交
1337

1338 1339 1340
	for (i = dev->queue_count; i <= dev->max_qid; i++) {
		if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
			ret = -ENOMEM;
K
Keith Busch 已提交
1341
			break;
1342 1343
		}
	}
K
Keith Busch 已提交
1344

1345 1346
	max = min(dev->max_qid, dev->queue_count - 1);
	for (i = dev->online_queues; i <= max; i++) {
1347 1348
		ret = nvme_create_queue(dev->queues[i], i);
		if (ret) {
1349
			nvme_free_queues(dev, i);
K
Keith Busch 已提交
1350
			break;
1351
		}
M
Matthew Wilcox 已提交
1352
	}
1353 1354 1355 1356 1357 1358 1359 1360

	/*
	 * Ignore failing Create SQ/CQ commands, we can continue with less
	 * than the desired aount of queues, and even a controller without
	 * I/O queues an still be used to issue admin commands.  This might
	 * be useful to upgrade a buggy firmware for example.
	 */
	return ret >= 0 ? 0 : ret;
M
Matthew Wilcox 已提交
1361 1362
}

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
	u64 szu, size, offset;
	u32 cmbloc;
	resource_size_t bar_size;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
	void __iomem *cmb;
	dma_addr_t dma_addr;

	if (!use_cmb_sqes)
		return NULL;

1375
	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1376 1377 1378
	if (!(NVME_CMB_SZ(dev->cmbsz)))
		return NULL;

1379
	cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414

	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
	size = szu * NVME_CMB_SZ(dev->cmbsz);
	offset = szu * NVME_CMB_OFST(cmbloc);
	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));

	if (offset > bar_size)
		return NULL;

	/*
	 * Controllers may support a CMB size larger than their BAR,
	 * for example, due to being behind a bridge. Reduce the CMB to
	 * the reported size of the BAR
	 */
	if (size > bar_size - offset)
		size = bar_size - offset;

	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
	cmb = ioremap_wc(dma_addr, size);
	if (!cmb)
		return NULL;

	dev->cmb_dma_addr = dma_addr;
	dev->cmb_size = size;
	return cmb;
}

static inline void nvme_release_cmb(struct nvme_dev *dev)
{
	if (dev->cmb) {
		iounmap(dev->cmb);
		dev->cmb = NULL;
	}
}

K
Keith Busch 已提交
1415 1416
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
1417
	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
K
Keith Busch 已提交
1418 1419
}

1420
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1421
{
M
Matias Bjørling 已提交
1422
	struct nvme_queue *adminq = dev->queues[0];
1423
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
1424
	int result, i, vecs, nr_io_queues, size;
M
Matthew Wilcox 已提交
1425

K
Keith Busch 已提交
1426
	nr_io_queues = num_possible_cpus();
C
Christoph Hellwig 已提交
1427 1428
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
	if (result < 0)
M
Matthew Wilcox 已提交
1429
		return result;
C
Christoph Hellwig 已提交
1430 1431 1432 1433 1434 1435 1436

	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (result > 0) {
1437 1438
		dev_err(dev->ctrl.device,
			"Could not set queue count (%d)\n", result);
1439
		return 0;
C
Christoph Hellwig 已提交
1440
	}
M
Matthew Wilcox 已提交
1441

1442 1443 1444 1445 1446 1447 1448 1449 1450
	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
		result = nvme_cmb_qdepth(dev, nr_io_queues,
				sizeof(struct nvme_command));
		if (result > 0)
			dev->q_depth = result;
		else
			nvme_release_cmb(dev);
	}

K
Keith Busch 已提交
1451 1452
	size = db_bar_size(dev, nr_io_queues);
	if (size > 8192) {
1453
		iounmap(dev->bar);
K
Keith Busch 已提交
1454 1455 1456 1457 1458 1459 1460 1461
		do {
			dev->bar = ioremap(pci_resource_start(pdev, 0), size);
			if (dev->bar)
				break;
			if (!--nr_io_queues)
				return -ENOMEM;
			size = db_bar_size(dev, nr_io_queues);
		} while (1);
1462
		dev->dbs = dev->bar + 4096;
1463
		adminq->q_db = dev->dbs;
1464 1465
	}

K
Keith Busch 已提交
1466
	/* Deregister the admin queue's interrupt */
1467
	free_irq(dev->entry[0].vector, adminq);
K
Keith Busch 已提交
1468

1469 1470 1471 1472
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
1473 1474 1475
	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	else if (pdev->msix_enabled)
1476 1477
		pci_disable_msix(pdev);

1478
	for (i = 0; i < nr_io_queues; i++)
M
Matthew Wilcox 已提交
1479
		dev->entry[i].entry = i;
1480 1481 1482 1483 1484 1485 1486 1487
	vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
	if (vecs < 0) {
		vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
		if (vecs < 0) {
			vecs = 1;
		} else {
			for (i = 0; i < vecs; i++)
				dev->entry[i].vector = i + pdev->irq;
R
Ramachandra Rao Gajula 已提交
1488 1489 1490
		}
	}

1491 1492 1493 1494 1495 1496 1497
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */
	nr_io_queues = vecs;
K
Keith Busch 已提交
1498
	dev->max_qid = nr_io_queues;
1499

1500
	result = queue_request_irq(dev, adminq, adminq->irqname);
1501 1502
	if (result) {
		adminq->cq_vector = -1;
1503
		goto free_queues;
1504
	}
1505
	return nvme_create_io_queues(dev);
M
Matthew Wilcox 已提交
1506

1507
 free_queues:
1508
	nvme_free_queues(dev, 1);
1509
	return result;
M
Matthew Wilcox 已提交
1510 1511
}

1512
static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
K
Keith Busch 已提交
1513
{
1514
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1515 1516
	struct nvme_queue *nvmeq;
	int i;
K
Keith Busch 已提交
1517

1518 1519
	for (i = 0; i < dev->online_queues; i++) {
		nvmeq = dev->queues[i];
K
Keith Busch 已提交
1520

1521 1522
		if (!nvmeq->tags || !(*nvmeq->tags))
			continue;
K
Keith Busch 已提交
1523

1524 1525
		irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
					blk_mq_tags_cpumask(*nvmeq->tags));
K
Keith Busch 已提交
1526 1527 1528
	}
}

K
Keith Busch 已提交
1529
static void nvme_del_queue_end(struct request *req, int error)
K
Keith Busch 已提交
1530
{
K
Keith Busch 已提交
1531
	struct nvme_queue *nvmeq = req->end_io_data;
1532

K
Keith Busch 已提交
1533 1534
	blk_mq_free_request(req);
	complete(&nvmeq->dev->ioq_wait);
K
Keith Busch 已提交
1535 1536
}

K
Keith Busch 已提交
1537
static void nvme_del_cq_end(struct request *req, int error)
K
Keith Busch 已提交
1538
{
K
Keith Busch 已提交
1539
	struct nvme_queue *nvmeq = req->end_io_data;
K
Keith Busch 已提交
1540

K
Keith Busch 已提交
1541 1542 1543
	if (!error) {
		unsigned long flags;

1544 1545 1546 1547 1548 1549 1550
		/*
		 * We might be called with the AQ q_lock held
		 * and the I/O queue q_lock should always
		 * nest inside the AQ one.
		 */
		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
					SINGLE_DEPTH_NESTING);
K
Keith Busch 已提交
1551 1552
		nvme_process_cq(nvmeq);
		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
K
Keith Busch 已提交
1553
	}
K
Keith Busch 已提交
1554 1555

	nvme_del_queue_end(req, error);
K
Keith Busch 已提交
1556 1557
}

K
Keith Busch 已提交
1558
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1559
{
K
Keith Busch 已提交
1560 1561 1562
	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
	struct request *req;
	struct nvme_command cmd;
1563

K
Keith Busch 已提交
1564 1565 1566
	memset(&cmd, 0, sizeof(cmd));
	cmd.delete_queue.opcode = opcode;
	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
1567

K
Keith Busch 已提交
1568 1569 1570
	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(req))
		return PTR_ERR(req);
1571

K
Keith Busch 已提交
1572 1573 1574 1575 1576 1577 1578
	req->timeout = ADMIN_TIMEOUT;
	req->end_io_data = nvmeq;

	blk_execute_rq_nowait(q, NULL, req, false,
			opcode == nvme_admin_delete_cq ?
				nvme_del_cq_end : nvme_del_queue_end);
	return 0;
1579 1580
}

K
Keith Busch 已提交
1581
static void nvme_disable_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1582
{
K
Keith Busch 已提交
1583 1584 1585
	int pass;
	unsigned long timeout;
	u8 opcode = nvme_admin_delete_sq;
K
Keith Busch 已提交
1586

K
Keith Busch 已提交
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	for (pass = 0; pass < 2; pass++) {
		int sent = 0, i = dev->queue_count - 1;

		reinit_completion(&dev->ioq_wait);
 retry:
		timeout = ADMIN_TIMEOUT;
		for (; i > 0; i--) {
			struct nvme_queue *nvmeq = dev->queues[i];

			if (!pass)
				nvme_suspend_queue(nvmeq);
			if (nvme_delete_queue(nvmeq, opcode))
				break;
			++sent;
		}
		while (sent--) {
			timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
			if (timeout == 0)
				return;
			if (i)
				goto retry;
		}
		opcode = nvme_admin_delete_cq;
	}
K
Keith Busch 已提交
1611 1612
}

1613 1614 1615 1616 1617 1618
/*
 * Return: error value if an error occurred setting up the queues or calling
 * Identify Device.  0 if these succeeded, even if adding some of the
 * namespaces failed.  At the moment, these failures are silent.  TBD which
 * failures should be reported.
 */
1619
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1620
{
1621
	if (!dev->ctrl.tagset) {
1622 1623 1624 1625 1626
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev_to_node(dev->dev);
		dev->tagset.queue_depth =
M
Matias Bjørling 已提交
1627
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
1628 1629 1630
		dev->tagset.cmd_size = nvme_cmd_size(dev);
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
M
Matthew Wilcox 已提交
1631

1632 1633
		if (blk_mq_alloc_tag_set(&dev->tagset))
			return 0;
1634
		dev->ctrl.tagset = &dev->tagset;
1635 1636 1637 1638 1639
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
1640
	}
1641

K
Keith Busch 已提交
1642
	return 0;
M
Matthew Wilcox 已提交
1643 1644
}

1645
static int nvme_pci_enable(struct nvme_dev *dev)
1646
{
K
Keith Busch 已提交
1647
	u64 cap;
1648
	int result = -ENOMEM;
1649
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1650 1651 1652 1653 1654 1655

	if (pci_enable_device_mem(pdev))
		return result;

	pci_set_master(pdev);

1656 1657
	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1658
		goto disable;
1659

1660
	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
K
Keith Busch 已提交
1661
		result = -ENODEV;
1662
		goto disable;
K
Keith Busch 已提交
1663
	}
1664 1665

	/*
1666 1667 1668
	 * Some devices and/or platforms don't advertise or work with INTx
	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
	 * adjust this later.
1669
	 */
1670 1671 1672 1673 1674 1675 1676 1677
	if (pci_enable_msix(pdev, dev->entry, 1)) {
		pci_enable_msi(pdev);
		dev->entry[0].vector = pdev->irq;
	}

	if (!dev->entry[0].vector) {
		result = -ENODEV;
		goto disable;
1678 1679
	}

1680 1681
	cap = lo_hi_readq(dev->bar + NVME_REG_CAP);

K
Keith Busch 已提交
1682 1683
	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
1684
	dev->dbs = dev->bar + 4096;
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696

	/*
	 * Temporary fix for the Apple controller found in the MacBook8,1 and
	 * some MacBook7,1 to avoid controller resets and data loss.
	 */
	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
		dev->q_depth = 2;
		dev_warn(dev->dev, "detected Apple NVMe controller, set "
			"queue depth=%u to work around controller resets\n",
			dev->q_depth);
	}

1697
	if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
1698
		dev->cmb = nvme_map_cmb(dev);
1699

K
Keith Busch 已提交
1700 1701
	pci_enable_pcie_error_reporting(pdev);
	pci_save_state(pdev);
1702 1703 1704 1705 1706 1707 1708 1709
	return 0;

 disable:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
1710 1711 1712 1713 1714 1715 1716
{
	if (dev->bar)
		iounmap(dev->bar);
	pci_release_regions(to_pci_dev(dev->dev));
}

static void nvme_pci_disable(struct nvme_dev *dev)
1717
{
1718 1719 1720 1721 1722 1723
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	if (pdev->msi_enabled)
		pci_disable_msi(pdev);
	else if (pdev->msix_enabled)
		pci_disable_msix(pdev);
1724

K
Keith Busch 已提交
1725 1726
	if (pci_is_enabled(pdev)) {
		pci_disable_pcie_error_reporting(pdev);
1727
		pci_disable_device(pdev);
K
Keith Busch 已提交
1728 1729 1730
	}
}

1731
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
M
Matthew Wilcox 已提交
1732
{
1733
	int i;
1734
	u32 csts = -1;
1735

1736
	del_timer_sync(&dev->watchdog_timer);
1737

1738
	mutex_lock(&dev->shutdown_lock);
1739
	if (pci_is_enabled(to_pci_dev(dev->dev))) {
1740
		nvme_stop_queues(&dev->ctrl);
1741
		csts = readl(dev->bar + NVME_REG_CSTS);
1742
	}
1743
	if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
K
Keith Busch 已提交
1744
		for (i = dev->queue_count - 1; i >= 0; i--) {
M
Matias Bjørling 已提交
1745
			struct nvme_queue *nvmeq = dev->queues[i];
K
Keith Busch 已提交
1746 1747 1748 1749
			nvme_suspend_queue(nvmeq);
		}
	} else {
		nvme_disable_io_queues(dev);
1750
		nvme_disable_admin_queue(dev, shutdown);
K
Keith Busch 已提交
1751
	}
1752
	nvme_pci_disable(dev);
1753

1754 1755
	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
1756
	mutex_unlock(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
1757 1758
}

M
Matthew Wilcox 已提交
1759 1760
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
1761
	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
M
Matthew Wilcox 已提交
1762 1763 1764 1765
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

1766
	/* Optimisation for I/Os between 4k and 128k */
1767
	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
1768 1769 1770 1771 1772
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1773 1774 1775 1776 1777 1778
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
1779
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
1780 1781
}

1782
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1783
{
1784
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1785

1786
	put_device(dev->dev);
1787 1788
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
1789 1790
	if (dev->ctrl.admin_q)
		blk_put_queue(dev->ctrl.admin_q);
1791 1792 1793 1794 1795
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
}

1796 1797
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
1798
	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
1799 1800

	kref_get(&dev->ctrl.kref);
1801
	nvme_dev_disable(dev, false);
1802 1803 1804 1805
	if (!schedule_work(&dev->remove_work))
		nvme_put_ctrl(&dev->ctrl);
}

1806
static void nvme_reset_work(struct work_struct *work)
1807
{
1808
	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1809
	int result = -ENODEV;
1810

1811
	if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
1812
		goto out;
1813

1814 1815 1816 1817
	/*
	 * If we're called to reset a live controller first shut it down before
	 * moving on.
	 */
1818
	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1819
		nvme_dev_disable(dev, false);
1820

1821 1822
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
		goto out;
1823

1824
	result = nvme_pci_enable(dev);
1825
	if (result)
1826
		goto out;
1827 1828 1829

	result = nvme_configure_admin_queue(dev);
	if (result)
1830
		goto out;
1831

M
Matias Bjørling 已提交
1832
	nvme_init_queue(dev->queues[0], 0);
K
Keith Busch 已提交
1833 1834
	result = nvme_alloc_admin_tags(dev);
	if (result)
1835
		goto out;
1836

1837 1838
	result = nvme_init_identify(&dev->ctrl);
	if (result)
1839
		goto out;
1840

1841
	result = nvme_setup_io_queues(dev);
1842
	if (result)
1843
		goto out;
1844

1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
	/*
	 * A controller that can not execute IO typically requires user
	 * intervention to correct. For such degraded controllers, the driver
	 * should not submit commands the user did not request, so skip
	 * registering for asynchronous event notification on this condition.
	 */
	if (dev->online_queues > 1) {
		dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
		queue_work(nvme_workq, &dev->async_work);
	}
1855

1856
	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1857

1858 1859 1860 1861
	/*
	 * Keep the controller around but remove all namespaces if we don't have
	 * any working I/O queue.
	 */
1862
	if (dev->online_queues < 2) {
1863
		dev_warn(dev->ctrl.device, "IO queues not created\n");
1864
		nvme_kill_queues(&dev->ctrl);
1865
		nvme_remove_namespaces(&dev->ctrl);
1866
	} else {
1867
		nvme_start_queues(&dev->ctrl);
1868 1869 1870
		nvme_dev_add(dev);
	}

1871 1872 1873 1874
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
		dev_warn(dev->ctrl.device, "failed to mark controller live\n");
		goto out;
	}
1875 1876

	if (dev->online_queues > 1)
1877
		nvme_queue_scan(&dev->ctrl);
1878
	return;
1879

1880
 out:
1881
	nvme_remove_dead_ctrl(dev, result);
1882 1883
}

1884
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
K
Keith Busch 已提交
1885
{
1886
	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1887
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
1888

1889
	nvme_kill_queues(&dev->ctrl);
K
Keith Busch 已提交
1890
	if (pci_get_drvdata(pdev))
1891
		pci_stop_and_remove_bus_device_locked(pdev);
1892
	nvme_put_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1893 1894
}

1895
static int nvme_reset(struct nvme_dev *dev)
K
Keith Busch 已提交
1896
{
1897
	if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
1898
		return -ENODEV;
1899

C
Christoph Hellwig 已提交
1900 1901
	if (!queue_work(nvme_workq, &dev->reset_work))
		return -EBUSY;
1902

C
Christoph Hellwig 已提交
1903 1904
	flush_work(&dev->reset_work);
	return 0;
K
Keith Busch 已提交
1905 1906
}

1907
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
T
Tejun Heo 已提交
1908
{
1909
	*val = readl(to_nvme_dev(ctrl)->bar + off);
1910
	return 0;
T
Tejun Heo 已提交
1911 1912
}

1913
static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1914
{
1915 1916 1917
	writel(val, to_nvme_dev(ctrl)->bar + off);
	return 0;
}
1918

1919 1920 1921 1922
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
	*val = readq(to_nvme_dev(ctrl)->bar + off);
	return 0;
1923 1924
}

1925 1926 1927
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
	return nvme_reset(to_nvme_dev(ctrl));
1928
}
1929

1930
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
1931
	.module			= THIS_MODULE,
1932
	.reg_read32		= nvme_pci_reg_read32,
1933
	.reg_write32		= nvme_pci_reg_write32,
1934
	.reg_read64		= nvme_pci_reg_read64,
1935
	.reset_ctrl		= nvme_pci_reset_ctrl,
1936
	.free_ctrl		= nvme_pci_free_ctrl,
1937
	.post_scan		= nvme_pci_post_scan,
1938
};
1939

1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
static int nvme_dev_map(struct nvme_dev *dev)
{
	int bars;
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	bars = pci_select_bars(pdev, IORESOURCE_MEM);
	if (!bars)
		return -ENODEV;
	if (pci_request_selected_regions(pdev, bars, "nvme"))
		return -ENODEV;

	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar)
		goto release;

       return 0;
  release:
       pci_release_regions(pdev);
       return -ENODEV;
}

1961
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
1962
{
M
Matias Bjørling 已提交
1963
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
1964 1965
	struct nvme_dev *dev;

M
Matias Bjørling 已提交
1966 1967 1968 1969 1970
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
		set_dev_node(&pdev->dev, 0);

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1971 1972
	if (!dev)
		return -ENOMEM;
M
Matias Bjørling 已提交
1973 1974
	dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
							GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1975 1976
	if (!dev->entry)
		goto free;
M
Matias Bjørling 已提交
1977 1978
	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
							GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1979 1980 1981
	if (!dev->queues)
		goto free;

1982
	dev->dev = get_device(&pdev->dev);
K
Keith Busch 已提交
1983
	pci_set_drvdata(pdev, dev);
1984

1985 1986 1987 1988
	result = nvme_dev_map(dev);
	if (result)
		goto free;

1989
	INIT_WORK(&dev->reset_work, nvme_reset_work);
1990
	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
1991
	INIT_WORK(&dev->async_work, nvme_async_event_work);
1992 1993
	setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
		(unsigned long)dev);
1994
	mutex_init(&dev->shutdown_lock);
K
Keith Busch 已提交
1995
	init_completion(&dev->ioq_wait);
M
Matthew Wilcox 已提交
1996

M
Matthew Wilcox 已提交
1997 1998
	result = nvme_setup_prp_pools(dev);
	if (result)
K
Keith Busch 已提交
1999
		goto put_pci;
2000

2001 2002
	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
			id->driver_data);
2003
	if (result)
2004
		goto release_pools;
2005

2006 2007
	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

2008
	queue_work(nvme_workq, &dev->reset_work);
M
Matthew Wilcox 已提交
2009 2010
	return 0;

2011
 release_pools:
M
Matthew Wilcox 已提交
2012
	nvme_release_prp_pools(dev);
K
Keith Busch 已提交
2013
 put_pci:
2014
	put_device(dev->dev);
2015
	nvme_dev_unmap(dev);
M
Matthew Wilcox 已提交
2016 2017 2018 2019 2020 2021 2022
 free:
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
	return result;
}

2023 2024
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
K
Keith Busch 已提交
2025
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2026

K
Keith Busch 已提交
2027
	if (prepare)
2028
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2029
	else
2030
		queue_work(nvme_workq, &dev->reset_work);
2031 2032
}

2033 2034 2035
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2036
	nvme_dev_disable(dev, true);
2037 2038
}

2039 2040 2041 2042 2043
/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
2044
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2045 2046
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2047

2048
	del_timer_sync(&dev->watchdog_timer);
K
Keith Busch 已提交
2049

2050 2051
	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);

K
Keith Busch 已提交
2052
	pci_set_drvdata(pdev, NULL);
2053
	flush_work(&dev->async_work);
2054
	nvme_uninit_ctrl(&dev->ctrl);
2055
	nvme_dev_disable(dev, true);
2056
	flush_work(&dev->reset_work);
M
Matias Bjørling 已提交
2057
	nvme_dev_remove_admin(dev);
2058
	nvme_free_queues(dev, 0);
2059
	nvme_release_cmb(dev);
K
Keith Busch 已提交
2060
	nvme_release_prp_pools(dev);
2061
	nvme_dev_unmap(dev);
2062
	nvme_put_ctrl(&dev->ctrl);
M
Matthew Wilcox 已提交
2063 2064
}

2065
#ifdef CONFIG_PM_SLEEP
2066 2067 2068 2069 2070
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2071
	nvme_dev_disable(ndev, true);
2072 2073 2074 2075 2076 2077 2078 2079
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2080
	queue_work(nvme_workq, &ndev->reset_work);
K
Keith Busch 已提交
2081
	return 0;
2082
}
2083
#endif
2084 2085

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2086

K
Keith Busch 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	/*
	 * A frozen channel requires a reset. When detected, this method will
	 * shutdown the controller to quiesce. The controller will be restarted
	 * after the slot reset through driver's slot_reset callback.
	 */
2097
	dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
K
Keith Busch 已提交
2098 2099 2100 2101
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
2102
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

2114
	dev_info(dev->ctrl.device, "restart after slot reset\n");
K
Keith Busch 已提交
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
	pci_restore_state(pdev);
	queue_work(nvme_workq, &dev->reset_work);
	return PCI_ERS_RESULT_RECOVERED;
}

static void nvme_error_resume(struct pci_dev *pdev)
{
	pci_cleanup_aer_uncorrect_error_status(pdev);
}

2125
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2126 2127 2128
	.error_detected	= nvme_error_detected,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2129
	.reset_notify	= nvme_reset_notify,
M
Matthew Wilcox 已提交
2130 2131 2132 2133 2134
};

/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS	0x010802

2135
static const struct pci_device_id nvme_id_table[] = {
2136
	{ PCI_VDEVICE(INTEL, 0x0953),
2137 2138
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DISCARD_ZEROES, },
2139 2140
	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
M
Matthew Wilcox 已提交
2141
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2142
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
M
Matthew Wilcox 已提交
2143 2144 2145 2146 2147 2148 2149 2150
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2151
	.remove		= nvme_remove,
2152
	.shutdown	= nvme_shutdown,
2153 2154 2155
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
M
Matthew Wilcox 已提交
2156 2157 2158 2159 2160
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2161
	int result;
2162

2163
	nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
K
Keith Busch 已提交
2164
	if (!nvme_workq)
2165
		return -ENOMEM;
K
Keith Busch 已提交
2166

2167 2168
	result = pci_register_driver(&nvme_driver);
	if (result)
2169
		destroy_workqueue(nvme_workq);
M
Matthew Wilcox 已提交
2170 2171 2172 2173 2174 2175
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
K
Keith Busch 已提交
2176
	destroy_workqueue(nvme_workq);
2177
	_nvme_check_size();
M
Matthew Wilcox 已提交
2178 2179 2180 2181
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2182
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2183 2184
module_init(nvme_init);
module_exit(nvme_exit);