pci.c 74.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
M
Matthew Wilcox 已提交
2 3
/*
 * NVM Express device driver
4
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
5 6
 */

K
Keith Busch 已提交
7
#include <linux/aer.h>
8
#include <linux/async.h>
M
Matthew Wilcox 已提交
9
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
10
#include <linux/blk-mq.h>
11
#include <linux/blk-mq-pci.h>
12
#include <linux/dmi.h>
M
Matthew Wilcox 已提交
13 14 15 16 17
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
18
#include <linux/mutex.h>
19
#include <linux/once.h>
M
Matthew Wilcox 已提交
20
#include <linux/pci.h>
K
Keith Busch 已提交
21
#include <linux/t10-pi.h>
M
Matthew Wilcox 已提交
22
#include <linux/types.h>
23
#include <linux/io-64-nonatomic-lo-hi.h>
24
#include <linux/sed-opal.h>
25
#include <linux/pci-p2pdma.h>
26

Y
yupeng 已提交
27
#include "trace.h"
28 29
#include "nvme.h"

M
Matthew Wilcox 已提交
30 31
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
32

C
Chaitanya Kulkarni 已提交
33
#define SGES_PER_PAGE	(PAGE_SIZE / sizeof(struct nvme_sgl_desc))
34

35 36 37 38 39 40 41
/*
 * These can be higher, but we need to ensure that any command doesn't
 * require an sg allocation that needs more than a page of data.
 */
#define NVME_MAX_KB_SZ	4096
#define NVME_MAX_SEGS	127

42 43 44
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

45
static bool use_cmb_sqes = true;
46
module_param(use_cmb_sqes, bool, 0444);
47 48
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

49 50 51 52
static unsigned int max_host_mem_size_mb = 128;
module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
53

C
Chaitanya Kulkarni 已提交
54 55 56 57 58 59
static unsigned int sgl_threshold = SZ_32K;
module_param(sgl_threshold, uint, 0644);
MODULE_PARM_DESC(sgl_threshold,
		"Use SGLs when average request segment size is larger or equal to "
		"this size. Use 0 to disable SGLs.");

60 61 62 63 64 65 66 67 68 69
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
	.set = io_queue_depth_set,
	.get = param_get_int,
};

static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");

70 71 72 73 74 75 76 77 78 79 80 81
static int queue_count_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops queue_count_ops = {
	.set = queue_count_set,
	.get = param_get_int,
};

static int write_queues;
module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644);
MODULE_PARM_DESC(write_queues,
	"Number of queues to use for writes. If not set, reads and writes "
	"will share a queue set.");

J
Jens Axboe 已提交
82
static int poll_queues = 0;
J
Jens Axboe 已提交
83 84 85
module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");

86 87
struct nvme_dev;
struct nvme_queue;
88

89
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
90
static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
91

92 93 94 95
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
96
	struct nvme_queue *queues;
97 98 99 100 101 102 103 104
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned online_queues;
	unsigned max_qid;
105
	unsigned io_queues[HCTX_MAX_TYPES];
106
	unsigned int num_vecs;
107 108 109
	int q_depth;
	u32 db_stride;
	void __iomem *bar;
110
	unsigned long bar_mapped_size;
111
	struct work_struct remove_work;
112
	struct mutex shutdown_lock;
113 114
	bool subsystem;
	u64 cmb_size;
115
	bool cmb_use_sqes;
116
	u32 cmbsz;
117
	u32 cmbloc;
118
	struct nvme_ctrl ctrl;
119

120 121
	mempool_t *iod_mempool;

122
	/* shadow doorbell buffer support: */
123 124 125 126
	u32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	u32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;
127 128 129 130

	/* host memory buffer support: */
	u64 host_mem_size;
	u32 nr_host_mem_descs;
131
	dma_addr_t host_mem_descs_dma;
132 133
	struct nvme_host_mem_buf_desc *host_mem_descs;
	void **host_mem_desc_bufs;
K
Keith Busch 已提交
134
};
135

136 137 138 139 140 141 142 143 144 145 146
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
	int n = 0, ret;

	ret = kstrtoint(val, 10, &n);
	if (ret != 0 || n < 2)
		return -EINVAL;

	return param_set_int(val, kp);
}

147 148 149 150 151
static int queue_count_set(const char *val, const struct kernel_param *kp)
{
	int n = 0, ret;

	ret = kstrtoint(val, 10, &n);
152 153
	if (ret)
		return ret;
154 155 156 157 158 159
	if (n > num_possible_cpus())
		n = num_possible_cpus();

	return param_set_int(val, kp);
}

160 161 162 163 164 165 166 167 168 169
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
	return qid * 2 * stride;
}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{
	return (qid * 2 + 1) * stride;
}

170 171 172 173 174
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

M
Matthew Wilcox 已提交
175 176 177 178 179
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
M
Matthew Wilcox 已提交
180
	struct nvme_dev *dev;
181
	spinlock_t sq_lock;
M
Matthew Wilcox 已提交
182
	struct nvme_command *sq_cmds;
183 184
	 /* only used for poll queues: */
	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
M
Matthew Wilcox 已提交
185
	volatile struct nvme_completion *cqes;
186
	struct blk_mq_tags **tags;
M
Matthew Wilcox 已提交
187 188 189 190
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
191
	u16 cq_vector;
M
Matthew Wilcox 已提交
192
	u16 sq_tail;
193
	u16 last_sq_tail;
M
Matthew Wilcox 已提交
194
	u16 cq_head;
195
	u16 last_cq_head;
K
Keith Busch 已提交
196
	u16 qid;
197
	u8 cq_phase;
198 199
	unsigned long flags;
#define NVMEQ_ENABLED		0
200
#define NVMEQ_SQ_CMB		1
201
#define NVMEQ_DELETE_ERROR	2
202
#define NVMEQ_POLLED		3
203 204 205 206
	u32 *dbbuf_sq_db;
	u32 *dbbuf_cq_db;
	u32 *dbbuf_sq_ei;
	u32 *dbbuf_cq_ei;
207
	struct completion delete_done;
M
Matthew Wilcox 已提交
208 209
};

210
/*
211 212 213 214
 * The nvme_iod describes the data in an I/O.
 *
 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
 * to the actual struct scatterlist.
215 216
 */
struct nvme_iod {
217
	struct nvme_request req;
C
Christoph Hellwig 已提交
218
	struct nvme_queue *nvmeq;
C
Chaitanya Kulkarni 已提交
219
	bool use_sgl;
C
Christoph Hellwig 已提交
220
	int aborted;
221 222 223
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	dma_addr_t first_dma;
224
	dma_addr_t meta_dma;
C
Christoph Hellwig 已提交
225
	struct scatterlist *sg;
M
Matthew Wilcox 已提交
226 227 228 229 230 231 232 233 234 235 236 237
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
238
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
239
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
240
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
241 242
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
M
Matthew Wilcox 已提交
243
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
244
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
245 246 247
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}

248 249
static unsigned int max_io_queues(void)
{
J
Jens Axboe 已提交
250
	return num_possible_cpus() + write_queues + poll_queues;
251 252 253 254 255 256 257 258
}

static unsigned int max_queue_count(void)
{
	/* IO queues + admin queue */
	return 1 + max_io_queues();
}

259 260
static inline unsigned int nvme_dbbuf_size(u32 stride)
{
261
	return (max_queue_count() * 8 * stride);
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
}

static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs)
		return 0;

	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_dbs_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_dbs)
		return -ENOMEM;
	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_eis_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
	}
	if (dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
		dev->dbbuf_eis = NULL;
	}
}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{
	if (!dev->dbbuf_dbs || !qid)
		return;

	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{
	struct nvme_command c;

	if (!dev->dbbuf_dbs)
		return;

	memset(&c, 0, sizeof(c));
	c.dbbuf.opcode = nvme_admin_dbbuf;
	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);

	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
330
		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		/* Free memory and continue on */
		nvme_dbbuf_dma_free(dev);
	}
}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
					      volatile u32 *dbbuf_ei)
{
	if (dbbuf_db) {
		u16 old_value;

		/*
		 * Ensure that the queue is written before updating
		 * the doorbell in memory
		 */
		wmb();

		old_value = *dbbuf_db;
		*dbbuf_db = value;

357 358 359 360 361 362 363 364
		/*
		 * Ensure that the doorbell is updated before reading the event
		 * index from memory.  The controller needs to provide similar
		 * ordering to ensure the envent index is updated before reading
		 * the doorbell.
		 */
		mb();

365 366 367 368 369
		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
			return false;
	}

	return true;
M
Matthew Wilcox 已提交
370 371
}

372 373 374 375 376 377 378
/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
379 380
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
381 382 383
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

C
Chaitanya Kulkarni 已提交
384 385 386 387 388
/*
 * Calculates the number of pages needed for the SGL segments. For example a 4k
 * page can accommodate 256 SGL descriptors.
 */
static int nvme_pci_npages_sgl(unsigned int num_seg)
389
{
C
Chaitanya Kulkarni 已提交
390
	return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
C
Christoph Hellwig 已提交
391
}
392

C
Chaitanya Kulkarni 已提交
393 394
static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg, bool use_sgl)
C
Christoph Hellwig 已提交
395
{
C
Chaitanya Kulkarni 已提交
396 397 398 399 400 401 402 403
	size_t alloc_size;

	if (use_sgl)
		alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
	else
		alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);

	return alloc_size + sizeof(struct scatterlist) * nseg;
C
Christoph Hellwig 已提交
404
}
405

M
Matias Bjørling 已提交
406 407
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
408
{
M
Matias Bjørling 已提交
409
	struct nvme_dev *dev = data;
410
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
411

412 413 414 415
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

M
Matias Bjørling 已提交
416
	hctx->driver_data = nvmeq;
417
	nvmeq->tags = &dev->admin_tagset.tags[0];
M
Matias Bjørling 已提交
418
	return 0;
419 420
}

421 422 423 424 425 426 427
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

M
Matias Bjørling 已提交
428 429
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
430
{
M
Matias Bjørling 已提交
431
	struct nvme_dev *dev = data;
432
	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
M
Matias Bjørling 已提交
433

434 435
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
M
Matthew Wilcox 已提交
436

437
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
M
Matias Bjørling 已提交
438 439
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
440 441
}

442 443
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
		unsigned int hctx_idx, unsigned int numa_node)
M
Matthew Wilcox 已提交
444
{
445
	struct nvme_dev *dev = set->driver_data;
C
Christoph Hellwig 已提交
446
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
447
	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
448
	struct nvme_queue *nvmeq = &dev->queues[queue_idx];
M
Matias Bjørling 已提交
449 450

	BUG_ON(!nvmeq);
C
Christoph Hellwig 已提交
451
	iod->nvmeq = nvmeq;
452 453

	nvme_req(req)->ctrl = &dev->ctrl;
M
Matias Bjørling 已提交
454 455 456
	return 0;
}

457 458 459 460 461 462 463 464 465
static int queue_irq_offset(struct nvme_dev *dev)
{
	/* if we have more than 1 vec, admin queue offsets us by 1 */
	if (dev->num_vecs > 1)
		return 1;

	return 0;
}

466 467 468
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_dev *dev = set->driver_data;
469 470 471 472 473 474 475 476
	int i, qoff, offset;

	offset = queue_irq_offset(dev);
	for (i = 0, qoff = 0; i < set->nr_maps; i++) {
		struct blk_mq_queue_map *map = &set->map[i];

		map->nr_queues = dev->io_queues[i];
		if (!map->nr_queues) {
477
			BUG_ON(i == HCTX_TYPE_DEFAULT);
478
			continue;
479 480
		}

J
Jens Axboe 已提交
481 482 483 484
		/*
		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
		 * affinity), so use the regular blk-mq cpu mapping
		 */
485
		map->queue_offset = qoff;
486
		if (i != HCTX_TYPE_POLL)
J
Jens Axboe 已提交
487 488 489
			blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
		else
			blk_mq_map_queues(map);
490 491 492 493 494
		qoff += map->nr_queues;
		offset += map->nr_queues;
	}

	return 0;
495 496
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
/*
 * Write sq tail if we are asked to, or if the next command would wrap.
 */
static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
{
	if (!write_sq) {
		u16 next_tail = nvmeq->sq_tail + 1;

		if (next_tail == nvmeq->q_depth)
			next_tail = 0;
		if (next_tail != nvmeq->last_sq_tail)
			return;
	}

	if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
			nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
		writel(nvmeq->sq_tail, nvmeq->q_db);
	nvmeq->last_sq_tail = nvmeq->sq_tail;
}

M
Matthew Wilcox 已提交
517
/**
518
 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
519 520
 * @nvmeq: The queue to use
 * @cmd: The command to send
521
 * @write_sq: whether to write to the SQ doorbell
M
Matthew Wilcox 已提交
522
 */
523 524
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
			    bool write_sq)
M
Matthew Wilcox 已提交
525
{
526
	spin_lock(&nvmeq->sq_lock);
527
	memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
528 529
	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
530 531 532 533 534 535 536 537 538 539 540
	nvme_write_sq_db(nvmeq, write_sq);
	spin_unlock(&nvmeq->sq_lock);
}

static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	spin_lock(&nvmeq->sq_lock);
	if (nvmeq->sq_tail != nvmeq->last_sq_tail)
		nvme_write_sq_db(nvmeq, true);
541
	spin_unlock(&nvmeq->sq_lock);
M
Matthew Wilcox 已提交
542 543
}

C
Chaitanya Kulkarni 已提交
544
static void **nvme_pci_iod_list(struct request *req)
M
Matthew Wilcox 已提交
545
{
C
Christoph Hellwig 已提交
546
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Chaitanya Kulkarni 已提交
547
	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
M
Matthew Wilcox 已提交
548 549
}

550 551 552
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
553
	int nseg = blk_rq_nr_phys_segments(req);
554 555
	unsigned int avg_seg_size;

556 557 558 559
	if (nseg == 0)
		return false;

	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
560 561 562 563 564 565 566 567 568 569

	if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
		return false;
	if (!iod->nvmeq->qid)
		return false;
	if (!sgl_threshold || avg_seg_size < sgl_threshold)
		return false;
	return true;
}

570
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
M
Matthew Wilcox 已提交
571
{
C
Christoph Hellwig 已提交
572
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
573 574
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
C
Chaitanya Kulkarni 已提交
575 576
	const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
577 578
	int i;

579 580 581 582 583 584 585
	if (iod->nents) {
		/* P2PDMA requests do not need to be unmapped */
		if (!is_pci_p2pdma_page(sg_page(iod->sg)))
			dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);

	}

586
	if (iod->npages == 0)
C
Chaitanya Kulkarni 已提交
587 588 589
		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
			dma_addr);

590
	for (i = 0; i < iod->npages; i++) {
C
Chaitanya Kulkarni 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		void *addr = nvme_pci_iod_list(req)[i];

		if (iod->use_sgl) {
			struct nvme_sgl_desc *sg_list = addr;

			next_dma_addr =
			    le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
		} else {
			__le64 *prp_list = addr;

			next_dma_addr = le64_to_cpu(prp_list[last_prp]);
		}

		dma_pool_free(dev->prp_page_pool, addr, dma_addr);
		dma_addr = next_dma_addr;
606
	}
607

608
	mempool_free(iod->sg, dev->iod_mempool);
K
Keith Busch 已提交
609 610
}

611 612 613 614 615 616 617 618 619 620 621 622 623 624
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		dma_addr_t phys = sg_phys(sg);
		pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
			"dma_address:%pad dma_length:%d\n",
			i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
			sg_dma_len(sg));
	}
}

C
Chaitanya Kulkarni 已提交
625 626
static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmnd)
M
Matthew Wilcox 已提交
627
{
C
Christoph Hellwig 已提交
628
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
629
	struct dma_pool *pool;
630
	int length = blk_rq_payload_bytes(req);
631
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
632 633
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
634
	u32 page_size = dev->ctrl.page_size;
635
	int offset = dma_addr & (page_size - 1);
636
	__le64 *prp_list;
C
Chaitanya Kulkarni 已提交
637
	void **list = nvme_pci_iod_list(req);
638
	dma_addr_t prp_dma;
639
	int nprps, i;
M
Matthew Wilcox 已提交
640

641
	length -= (page_size - offset);
642 643
	if (length <= 0) {
		iod->first_dma = 0;
C
Chaitanya Kulkarni 已提交
644
		goto done;
645
	}
M
Matthew Wilcox 已提交
646

647
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
648
	if (dma_len) {
649
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
650 651 652 653 654 655
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

656
	if (length <= page_size) {
657
		iod->first_dma = dma_addr;
C
Chaitanya Kulkarni 已提交
658
		goto done;
659 660
	}

661
	nprps = DIV_ROUND_UP(length, page_size);
662 663
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
664
		iod->npages = 0;
665 666
	} else {
		pool = dev->prp_page_pool;
667
		iod->npages = 1;
668 669
	}

670
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
671
	if (!prp_list) {
672
		iod->first_dma = dma_addr;
673
		iod->npages = -1;
674
		return BLK_STS_RESOURCE;
675
	}
676 677
	list[0] = prp_list;
	iod->first_dma = prp_dma;
678 679
	i = 0;
	for (;;) {
680
		if (i == page_size >> 3) {
681
			__le64 *old_prp_list = prp_list;
682
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
683
			if (!prp_list)
684
				return BLK_STS_RESOURCE;
685
			list[iod->npages++] = prp_list;
686 687 688
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
689 690
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
691 692 693
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
694 695 696 697
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
698 699
		if (unlikely(dma_len < 0))
			goto bad_sgl;
700 701 702
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
703 704
	}

C
Chaitanya Kulkarni 已提交
705 706 707 708
done:
	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);

709 710 711
	return BLK_STS_OK;

 bad_sgl:
712 713 714
	WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
			"Invalid SGL for payload:%d nents:%d\n",
			blk_rq_payload_bytes(req), iod->nents);
715
	return BLK_STS_IOERR;
M
Matthew Wilcox 已提交
716 717
}

C
Chaitanya Kulkarni 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
		struct scatterlist *sg)
{
	sge->addr = cpu_to_le64(sg_dma_address(sg));
	sge->length = cpu_to_le32(sg_dma_len(sg));
	sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}

static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
		dma_addr_t dma_addr, int entries)
{
	sge->addr = cpu_to_le64(dma_addr);
	if (entries < SGES_PER_PAGE) {
		sge->length = cpu_to_le32(entries * sizeof(*sge));
		sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
	} else {
		sge->length = cpu_to_le32(PAGE_SIZE);
		sge->type = NVME_SGL_FMT_SEG_DESC << 4;
	}
}

static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
740
		struct request *req, struct nvme_rw_command *cmd, int entries)
C
Chaitanya Kulkarni 已提交
741 742 743 744 745 746
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct dma_pool *pool;
	struct nvme_sgl_desc *sg_list;
	struct scatterlist *sg = iod->sg;
	dma_addr_t sgl_dma;
747
	int i = 0;
C
Chaitanya Kulkarni 已提交
748 749 750 751

	/* setting the transfer type as SGL */
	cmd->flags = NVME_CMD_SGL_METABUF;

752
	if (entries == 1) {
C
Chaitanya Kulkarni 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
		nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
		return BLK_STS_OK;
	}

	if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
		pool = dev->prp_small_pool;
		iod->npages = 0;
	} else {
		pool = dev->prp_page_pool;
		iod->npages = 1;
	}

	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
	if (!sg_list) {
		iod->npages = -1;
		return BLK_STS_RESOURCE;
	}

	nvme_pci_iod_list(req)[0] = sg_list;
	iod->first_dma = sgl_dma;

	nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);

	do {
		if (i == SGES_PER_PAGE) {
			struct nvme_sgl_desc *old_sg_desc = sg_list;
			struct nvme_sgl_desc *link = &old_sg_desc[i - 1];

			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
			if (!sg_list)
				return BLK_STS_RESOURCE;

			i = 0;
			nvme_pci_iod_list(req)[iod->npages++] = sg_list;
			sg_list[i++] = *link;
			nvme_pci_sgl_set_seg(link, sgl_dma, entries);
		}

		nvme_pci_sgl_set_data(&sg_list[i++], sg);
		sg = sg_next(sg);
793
	} while (--entries > 0);
C
Chaitanya Kulkarni 已提交
794 795 796 797

	return BLK_STS_OK;
}

798
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
799
		struct nvme_command *cmnd)
800
{
C
Christoph Hellwig 已提交
801
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
802 803 804
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
805
	blk_status_t ret = BLK_STS_IOERR;
806
	int nr_mapped;
807

808 809 810
	iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
	if (!iod->sg)
		return BLK_STS_RESOURCE;
811 812 813

	iod->use_sgl = nvme_pci_use_sgls(dev, req);

814
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
C
Christoph Hellwig 已提交
815 816 817
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
818

819
	ret = BLK_STS_RESOURCE;
820 821 822 823 824 825 826

	if (is_pci_p2pdma_page(sg_page(iod->sg)))
		nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
					  dma_dir);
	else
		nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
					     dma_dir,  DMA_ATTR_NO_WARN);
827
	if (!nr_mapped)
C
Christoph Hellwig 已提交
828
		goto out;
829

830
	if (iod->use_sgl)
831
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
C
Chaitanya Kulkarni 已提交
832 833
	else
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
834
out:
835
	if (ret != BLK_STS_OK)
836 837 838
		nvme_unmap_data(dev, req);
	return ret;
}
839

840 841 842 843
static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
		struct nvme_command *cmnd)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
M
Matthew Wilcox 已提交
844

845 846 847 848 849 850
	iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
			rq_dma_dir(req), 0);
	if (dma_mapping_error(dev->dev, iod->meta_dma))
		return BLK_STS_IOERR;
	cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
	return 0;
M
Matthew Wilcox 已提交
851 852
}

853 854 855
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
856
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
M
Matias Bjørling 已提交
857
			 const struct blk_mq_queue_data *bd)
858
{
M
Matias Bjørling 已提交
859 860
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
861
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
862
	struct request *req = bd->rq;
863
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
C
Christoph Hellwig 已提交
864
	struct nvme_command cmnd;
865
	blk_status_t ret;
K
Keith Busch 已提交
866

867 868 869 870
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;

871 872 873 874
	/*
	 * We should not need to do this, but we're still using this to
	 * ensure we can drain requests on a dying queue.
	 */
875
	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
876 877
		return BLK_STS_IOERR;

878
	ret = nvme_setup_cmd(ns, req, &cmnd);
879
	if (ret)
C
Christoph Hellwig 已提交
880
		return ret;
M
Matias Bjørling 已提交
881

882
	if (blk_rq_nr_phys_segments(req)) {
883
		ret = nvme_map_data(dev, req, &cmnd);
884
		if (ret)
885
			goto out_free_cmd;
886
	}
M
Matias Bjørling 已提交
887

888 889 890 891 892 893
	if (blk_integrity_rq(req)) {
		ret = nvme_map_metadata(dev, req, &cmnd);
		if (ret)
			goto out_unmap_data;
	}

894
	blk_mq_start_request(req);
895
	nvme_submit_cmd(nvmeq, &cmnd, bd->last);
896
	return BLK_STS_OK;
897 898
out_unmap_data:
	nvme_unmap_data(dev, req);
899 900
out_free_cmd:
	nvme_cleanup_cmd(req);
C
Christoph Hellwig 已提交
901
	return ret;
M
Matthew Wilcox 已提交
902
}
K
Keith Busch 已提交
903

904
static void nvme_pci_complete_rq(struct request *req)
905
{
C
Christoph Hellwig 已提交
906
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
907
	struct nvme_dev *dev = iod->nvmeq->dev;
M
Matias Bjørling 已提交
908

909
	nvme_cleanup_cmd(req);
910 911 912
	if (blk_integrity_rq(req))
		dma_unmap_page(dev->dev, iod->meta_dma,
			       rq_integrity_vec(req)->bv_len, rq_data_dir(req));
913
	if (blk_rq_nr_phys_segments(req))
914
		nvme_unmap_data(dev, req);
915
	nvme_complete_rq(req);
M
Matthew Wilcox 已提交
916 917
}

918
/* We read the CQE phase first to check if the rest of the entry is valid */
919
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
920
{
921 922
	return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
			nvmeq->cq_phase;
923 924
}

925
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
926
{
927
	u16 head = nvmeq->cq_head;
928

929 930 931
	if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
					      nvmeq->dbbuf_cq_ei))
		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
932
}
933

934
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
935
{
936
	volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
937
	struct request *req;
938

939 940 941 942 943
	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
		dev_warn(nvmeq->dev->ctrl.device,
			"invalid id %d completed on queue %d\n",
			cqe->command_id, le16_to_cpu(cqe->sq_id));
		return;
M
Matthew Wilcox 已提交
944 945
	}

946 947 948 949 950 951 952
	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
	if (unlikely(nvmeq->qid == 0 &&
K
Keith Busch 已提交
953
			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
954 955
		nvme_complete_async_event(&nvmeq->dev->ctrl,
				cqe->status, &cqe->result);
J
Jens Axboe 已提交
956
		return;
957
	}
M
Matthew Wilcox 已提交
958

959
	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
Y
yupeng 已提交
960
	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
961 962
	nvme_end_request(req, cqe->status, cqe->result);
}
M
Matthew Wilcox 已提交
963

964
static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
M
Matthew Wilcox 已提交
965
{
966 967 968 969 970 971
	while (start != end) {
		nvme_handle_cqe(nvmeq, start);
		if (++start == nvmeq->q_depth)
			start = 0;
	}
}
972

973 974
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
975
	if (nvmeq->cq_head == nvmeq->q_depth - 1) {
976 977
		nvmeq->cq_head = 0;
		nvmeq->cq_phase = !nvmeq->cq_phase;
978 979
	} else {
		nvmeq->cq_head++;
M
Matthew Wilcox 已提交
980
	}
J
Jens Axboe 已提交
981 982
}

983 984
static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
				  u16 *end, unsigned int tag)
J
Jens Axboe 已提交
985
{
986
	int found = 0;
M
Matthew Wilcox 已提交
987

988
	*start = nvmeq->cq_head;
989 990 991
	while (nvme_cqe_pending(nvmeq)) {
		if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
			found++;
992
		nvme_update_cq_head(nvmeq);
993
	}
994
	*end = nvmeq->cq_head;
995

996
	if (*start != *end)
997
		nvme_ring_cq_doorbell(nvmeq);
998
	return found;
M
Matthew Wilcox 已提交
999 1000 1001
}

static irqreturn_t nvme_irq(int irq, void *data)
1002 1003
{
	struct nvme_queue *nvmeq = data;
1004
	irqreturn_t ret = IRQ_NONE;
1005 1006
	u16 start, end;

1007 1008 1009 1010 1011
	/*
	 * The rmb/wmb pair ensures we see all updates from a previous run of
	 * the irq handler, even if that was on another CPU.
	 */
	rmb();
1012 1013
	if (nvmeq->cq_head != nvmeq->last_cq_head)
		ret = IRQ_HANDLED;
1014
	nvme_process_cq(nvmeq, &start, &end, -1);
1015
	nvmeq->last_cq_head = nvmeq->cq_head;
1016
	wmb();
1017

1018 1019 1020 1021 1022 1023
	if (start != end) {
		nvme_complete_cqes(nvmeq, start, end);
		return IRQ_HANDLED;
	}

	return ret;
1024 1025 1026 1027 1028
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
1029
	if (nvme_cqe_pending(nvmeq))
1030 1031
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
1032 1033
}

1034 1035 1036 1037 1038
/*
 * Poll for completions any queue, including those not dedicated to polling.
 * Can be called from any context.
 */
static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
J
Jens Axboe 已提交
1039
{
1040
	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1041
	u16 start, end;
1042
	int found;
J
Jens Axboe 已提交
1043

1044 1045 1046 1047 1048
	/*
	 * For a poll queue we need to protect against the polling thread
	 * using the CQ lock.  For normal interrupt driven threads we have
	 * to disable the interrupt to avoid racing with it.
	 */
1049
	if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
1050
		spin_lock(&nvmeq->cq_poll_lock);
1051
		found = nvme_process_cq(nvmeq, &start, &end, tag);
1052
		spin_unlock(&nvmeq->cq_poll_lock);
1053 1054 1055
	} else {
		disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
		found = nvme_process_cq(nvmeq, &start, &end, tag);
1056
		enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1057
	}
1058

1059
	nvme_complete_cqes(nvmeq, start, end);
1060
	return found;
J
Jens Axboe 已提交
1061 1062
}

1063
static int nvme_poll(struct blk_mq_hw_ctx *hctx)
1064 1065 1066 1067 1068 1069 1070 1071
{
	struct nvme_queue *nvmeq = hctx->driver_data;
	u16 start, end;
	bool found;

	if (!nvme_cqe_pending(nvmeq))
		return 0;

1072
	spin_lock(&nvmeq->cq_poll_lock);
1073
	found = nvme_process_cq(nvmeq, &start, &end, -1);
1074
	spin_unlock(&nvmeq->cq_poll_lock);
1075 1076 1077 1078 1079

	nvme_complete_cqes(nvmeq, start, end);
	return found;
}

1080
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
M
Matthew Wilcox 已提交
1081
{
1082
	struct nvme_dev *dev = to_nvme_dev(ctrl);
1083
	struct nvme_queue *nvmeq = &dev->queues[0];
M
Matias Bjørling 已提交
1084
	struct nvme_command c;
M
Matthew Wilcox 已提交
1085

M
Matias Bjørling 已提交
1086 1087
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
1088
	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1089
	nvme_submit_cmd(nvmeq, &c, true);
1090 1091
}

M
Matthew Wilcox 已提交
1092
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1093
{
M
Matthew Wilcox 已提交
1094 1095 1096 1097 1098 1099
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

1100
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1101 1102 1103
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1104
		struct nvme_queue *nvmeq, s16 vector)
M
Matthew Wilcox 已提交
1105 1106
{
	struct nvme_command c;
J
Jens Axboe 已提交
1107 1108
	int flags = NVME_QUEUE_PHYS_CONTIG;

1109
	if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
J
Jens Axboe 已提交
1110
		flags |= NVME_CQ_IRQ_ENABLED;
M
Matthew Wilcox 已提交
1111

1112
	/*
M
Minwoo Im 已提交
1113
	 * Note: we (ab)use the fact that the prp fields survive if no data
1114 1115
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1116 1117 1118 1119 1120 1121
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
1122
	c.create_cq.irq_vector = cpu_to_le16(vector);
M
Matthew Wilcox 已提交
1123

1124
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1125 1126 1127 1128 1129
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
1130
	struct nvme_ctrl *ctrl = &dev->ctrl;
M
Matthew Wilcox 已提交
1131
	struct nvme_command c;
1132
	int flags = NVME_QUEUE_PHYS_CONTIG;
M
Matthew Wilcox 已提交
1133

1134 1135 1136 1137 1138 1139 1140 1141
	/*
	 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
	 * set. Since URGENT priority is zeroes, it makes all queues
	 * URGENT.
	 */
	if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
		flags |= NVME_SQ_PRIO_MEDIUM;

1142
	/*
M
Minwoo Im 已提交
1143
	 * Note: we (ab)use the fact that the prp fields survive if no data
1144 1145
	 * is attached to the request.
	 */
M
Matthew Wilcox 已提交
1146 1147 1148 1149 1150 1151 1152 1153
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

1154
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
M
Matthew Wilcox 已提交
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

1167
static void abort_endio(struct request *req, blk_status_t error)
1168
{
C
Christoph Hellwig 已提交
1169 1170
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
1171

1172 1173
	dev_warn(nvmeq->dev->ctrl.device,
		 "Abort status: 0x%x", nvme_req(req)->status);
1174 1175
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
1176 1177
}

K
Keith Busch 已提交
1178 1179 1180 1181 1182 1183 1184 1185
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{

	/* If true, indicates loss of adapter communication, possibly by a
	 * NVMe Subsystem reset.
	 */
	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);

1186 1187 1188
	/* If there is a reset/reinit ongoing, we shouldn't reset again. */
	switch (dev->ctrl.state) {
	case NVME_CTRL_RESETTING:
1189
	case NVME_CTRL_CONNECTING:
K
Keith Busch 已提交
1190
		return false;
1191 1192 1193
	default:
		break;
	}
K
Keith Busch 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221

	/* We shouldn't reset unless the controller is on fatal error state
	 * _or_ if we lost the communication with it.
	 */
	if (!(csts & NVME_CSTS_CFS) && !nssro)
		return false;

	return true;
}

static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
	/* Read a config register to help see what died. */
	u16 pci_status;
	int result;

	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
				      &pci_status);
	if (result == PCIBIOS_SUCCESSFUL)
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
			 csts, pci_status);
	else
		dev_warn(dev->ctrl.device,
			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
			 csts, result);
}

1222
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
K
Keith Busch 已提交
1223
{
C
Christoph Hellwig 已提交
1224 1225
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
K
Keith Busch 已提交
1226
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
1227 1228
	struct request *abort_req;
	struct nvme_command cmd;
K
Keith Busch 已提交
1229 1230
	u32 csts = readl(dev->bar + NVME_REG_CSTS);

W
Wen Xiong 已提交
1231 1232 1233 1234 1235 1236 1237
	/* If PCI error recovery process is happening, we cannot reset or
	 * the recovery mechanism will surely fail.
	 */
	mb();
	if (pci_channel_offline(to_pci_dev(dev->dev)))
		return BLK_EH_RESET_TIMER;

K
Keith Busch 已提交
1238 1239 1240 1241 1242 1243
	/*
	 * Reset immediately if the controller is failed
	 */
	if (nvme_should_reset(dev, csts)) {
		nvme_warn_reset(dev, csts);
		nvme_dev_disable(dev, false);
1244
		nvme_reset_ctrl(&dev->ctrl);
1245
		return BLK_EH_DONE;
K
Keith Busch 已提交
1246
	}
K
Keith Busch 已提交
1247

K
Keith Busch 已提交
1248 1249 1250
	/*
	 * Did we miss an interrupt?
	 */
1251
	if (nvme_poll_irqdisable(nvmeq, req->tag)) {
K
Keith Busch 已提交
1252 1253 1254
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
1255
		return BLK_EH_DONE;
K
Keith Busch 已提交
1256 1257
	}

1258
	/*
1259 1260 1261
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
1262
	 * shutdown, so we return BLK_EH_DONE.
1263
	 */
1264 1265 1266
	switch (dev->ctrl.state) {
	case NVME_CTRL_CONNECTING:
	case NVME_CTRL_RESETTING:
1267
		dev_warn_ratelimited(dev->ctrl.device,
1268 1269
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
1270
		nvme_dev_disable(dev, false);
1271
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1272
		return BLK_EH_DONE;
1273 1274
	default:
		break;
K
Keith Busch 已提交
1275 1276
	}

1277 1278 1279 1280
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
1281
	 */
C
Christoph Hellwig 已提交
1282
	if (!nvmeq->qid || iod->aborted) {
1283
		dev_warn(dev->ctrl.device,
1284 1285
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
1286
		nvme_dev_disable(dev, false);
1287
		nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
1288

1289
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1290
		return BLK_EH_DONE;
K
Keith Busch 已提交
1291 1292
	}

1293
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1294
		atomic_inc(&dev->ctrl.abort_limit);
1295
		return BLK_EH_RESET_TIMER;
1296
	}
1297
	iod->aborted = 1;
M
Matias Bjørling 已提交
1298

K
Keith Busch 已提交
1299 1300
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
1301
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
1302 1303
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

1304 1305 1306
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
1307 1308

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1309
			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1310 1311 1312 1313 1314 1315 1316 1317
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
K
Keith Busch 已提交
1318

1319 1320 1321 1322 1323 1324
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
K
Keith Busch 已提交
1325 1326
}

M
Matias Bjørling 已提交
1327 1328
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
1329
	dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
1330
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1331 1332
	if (!nvmeq->sq_cmds)
		return;
1333

1334
	if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1335
		pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1336 1337
				nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
	} else {
1338
		dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
1339
				nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1340
	}
1341 1342
}

1343
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1344 1345 1346
{
	int i;

1347 1348
	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
		dev->ctrl.queue_count--;
1349
		nvme_free_queue(&dev->queues[i]);
1350
	}
1351 1352
}

K
Keith Busch 已提交
1353 1354
/**
 * nvme_suspend_queue - put queue into suspended state
1355
 * @nvmeq: queue to suspend
K
Keith Busch 已提交
1356 1357
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
1358
{
1359
	if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
K
Keith Busch 已提交
1360
		return 1;
1361

1362
	/* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
1363
	mb();
1364

1365
	nvmeq->dev->online_queues--;
1366
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1367
		blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1368 1369
	if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
		pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
K
Keith Busch 已提交
1370 1371
	return 0;
}
M
Matthew Wilcox 已提交
1372

1373 1374 1375 1376 1377 1378 1379 1380
static void nvme_suspend_io_queues(struct nvme_dev *dev)
{
	int i;

	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
		nvme_suspend_queue(&dev->queues[i]);
}

1381
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
K
Keith Busch 已提交
1382
{
1383
	struct nvme_queue *nvmeq = &dev->queues[0];
K
Keith Busch 已提交
1384

1385 1386 1387
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
1388
		nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1389

1390
	nvme_poll_irqdisable(nvmeq, -1);
M
Matthew Wilcox 已提交
1391 1392
}

1393 1394 1395 1396
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1397 1398
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1399 1400

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1401
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1402
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1403
		q_depth = div_u64(mem_per_q, entry_size);
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
1420 1421 1422 1423 1424 1425
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
		nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
		nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
						nvmeq->sq_cmds);
1426 1427 1428 1429
		if (nvmeq->sq_dma_addr) {
			set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
			return 0; 
		}
1430
	}
1431

1432 1433
	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
				&nvmeq->sq_dma_addr, GFP_KERNEL);
1434 1435
	if (!nvmeq->sq_cmds)
		return -ENOMEM;
1436 1437 1438
	return 0;
}

1439
static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
M
Matthew Wilcox 已提交
1440
{
1441
	struct nvme_queue *nvmeq = &dev->queues[qid];
M
Matthew Wilcox 已提交
1442

1443 1444
	if (dev->ctrl.queue_count > qid)
		return 0;
M
Matthew Wilcox 已提交
1445

1446 1447
	nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
					 &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1448 1449 1450
	if (!nvmeq->cqes)
		goto free_nvmeq;

1451
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
M
Matthew Wilcox 已提交
1452 1453
		goto free_cqdma;

M
Matthew Wilcox 已提交
1454
	nvmeq->dev = dev;
1455
	spin_lock_init(&nvmeq->sq_lock);
1456
	spin_lock_init(&nvmeq->cq_poll_lock);
M
Matthew Wilcox 已提交
1457
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1458
	nvmeq->cq_phase = 1;
1459
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1460
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1461
	nvmeq->qid = qid;
1462
	dev->ctrl.queue_count++;
1463

1464
	return 0;
M
Matthew Wilcox 已提交
1465 1466

 free_cqdma:
1467
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1468 1469
							nvmeq->cq_dma_addr);
 free_nvmeq:
1470
	return -ENOMEM;
M
Matthew Wilcox 已提交
1471 1472
}

1473
static int queue_request_irq(struct nvme_queue *nvmeq)
1474
{
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
	int nr = nvmeq->dev->ctrl.instance;

	if (use_threaded_interrupts) {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	} else {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	}
1485 1486
}

1487
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1488
{
1489
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1490

1491
	nvmeq->sq_tail = 0;
1492
	nvmeq->last_sq_tail = 0;
1493 1494
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1495
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1496
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1497
	nvme_dbbuf_init(dev, nvmeq, qid);
K
Keith Busch 已提交
1498
	dev->online_queues++;
1499
	wmb(); /* ensure the first interrupt sees the initialization */
1500 1501
}

J
Jens Axboe 已提交
1502
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1503 1504 1505
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1506
	u16 vector = 0;
1507

1508 1509
	clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);

1510 1511 1512 1513
	/*
	 * A queue's vector matches the queue identifier unless the controller
	 * has only one vector available.
	 */
J
Jens Axboe 已提交
1514 1515 1516
	if (!polled)
		vector = dev->num_vecs == 1 ? 0 : qid;
	else
1517
		set_bit(NVMEQ_POLLED, &nvmeq->flags);
J
Jens Axboe 已提交
1518

1519
	result = adapter_alloc_cq(dev, qid, nvmeq, vector);
K
Keith Busch 已提交
1520 1521
	if (result)
		return result;
M
Matthew Wilcox 已提交
1522 1523 1524

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
K
Keith Busch 已提交
1525 1526
		return result;
	else if (result)
M
Matthew Wilcox 已提交
1527 1528
		goto release_cq;

1529
	nvmeq->cq_vector = vector;
1530
	nvme_init_queue(nvmeq, qid);
J
Jens Axboe 已提交
1531

1532 1533
	if (!polled) {
		nvmeq->cq_vector = vector;
J
Jens Axboe 已提交
1534 1535 1536 1537
		result = queue_request_irq(nvmeq);
		if (result < 0)
			goto release_sq;
	}
M
Matthew Wilcox 已提交
1538

1539
	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1540
	return result;
M
Matthew Wilcox 已提交
1541

1542
release_sq:
1543
	dev->online_queues--;
M
Matthew Wilcox 已提交
1544
	adapter_delete_sq(dev, qid);
1545
release_cq:
M
Matthew Wilcox 已提交
1546
	adapter_delete_cq(dev, qid);
1547
	return result;
M
Matthew Wilcox 已提交
1548 1549
}

1550
static const struct blk_mq_ops nvme_mq_admin_ops = {
1551
	.queue_rq	= nvme_queue_rq,
1552
	.complete	= nvme_pci_complete_rq,
M
Matias Bjørling 已提交
1553
	.init_hctx	= nvme_admin_init_hctx,
1554
	.exit_hctx      = nvme_admin_exit_hctx,
1555
	.init_request	= nvme_init_request,
M
Matias Bjørling 已提交
1556 1557 1558
	.timeout	= nvme_timeout,
};

1559
static const struct blk_mq_ops nvme_mq_ops = {
1560 1561 1562 1563 1564 1565 1566 1567
	.queue_rq	= nvme_queue_rq,
	.complete	= nvme_pci_complete_rq,
	.commit_rqs	= nvme_commit_rqs,
	.init_hctx	= nvme_init_hctx,
	.init_request	= nvme_init_request,
	.map_queues	= nvme_pci_map_queues,
	.timeout	= nvme_timeout,
	.poll		= nvme_poll,
1568 1569
};

1570 1571
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
1572
	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1573 1574 1575 1576 1577
		/*
		 * If the controller was reset during removal, it's possible
		 * user requests may be waiting on a stopped queue. Start the
		 * queue to flush these to completion.
		 */
1578
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1579
		blk_cleanup_queue(dev->ctrl.admin_q);
1580 1581 1582 1583
		blk_mq_free_tag_set(&dev->admin_tagset);
	}
}

M
Matias Bjørling 已提交
1584 1585
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
1586
	if (!dev->ctrl.admin_q) {
M
Matias Bjørling 已提交
1587 1588
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
K
Keith Busch 已提交
1589

K
Keith Busch 已提交
1590
		dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
M
Matias Bjørling 已提交
1591
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1592
		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1593
		dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
1594
		dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
M
Matias Bjørling 已提交
1595 1596 1597 1598
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;
1599
		dev->ctrl.admin_tagset = &dev->admin_tagset;
M
Matias Bjørling 已提交
1600

1601 1602
		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
		if (IS_ERR(dev->ctrl.admin_q)) {
M
Matias Bjørling 已提交
1603 1604 1605
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
1606
		if (!blk_get_queue(dev->ctrl.admin_q)) {
1607
			nvme_dev_remove_admin(dev);
1608
			dev->ctrl.admin_q = NULL;
1609 1610
			return -ENODEV;
		}
K
Keith Busch 已提交
1611
	} else
1612
		blk_mq_unquiesce_queue(dev->ctrl.admin_q);
M
Matias Bjørling 已提交
1613 1614 1615 1616

	return 0;
}

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
}

static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

	if (size <= dev->bar_mapped_size)
		return 0;
	if (size > pci_resource_len(pdev, 0))
		return -ENOMEM;
	if (dev->bar)
		iounmap(dev->bar);
	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
	if (!dev->bar) {
		dev->bar_mapped_size = 0;
		return -ENOMEM;
	}
	dev->bar_mapped_size = size;
	dev->dbs = dev->bar + NVME_REG_DBS;

	return 0;
}

1643
static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1644
{
1645
	int result;
M
Matthew Wilcox 已提交
1646 1647 1648
	u32 aqa;
	struct nvme_queue *nvmeq;

1649 1650 1651 1652
	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
	if (result < 0)
		return result;

1653
	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1654
				NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1655

1656 1657 1658
	if (dev->subsystem &&
	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1659

1660
	result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1661 1662
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1663

1664
	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1665 1666
	if (result)
		return result;
M
Matthew Wilcox 已提交
1667

1668
	nvmeq = &dev->queues[0];
M
Matthew Wilcox 已提交
1669 1670 1671
	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1672 1673 1674
	writel(aqa, dev->bar + NVME_REG_AQA);
	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
M
Matthew Wilcox 已提交
1675

1676
	result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
1677
	if (result)
K
Keith Busch 已提交
1678
		return result;
M
Matias Bjørling 已提交
1679

K
Keith Busch 已提交
1680
	nvmeq->cq_vector = 0;
1681
	nvme_init_queue(nvmeq, 0);
1682
	result = queue_request_irq(nvmeq);
1683
	if (result) {
1684
		dev->online_queues--;
K
Keith Busch 已提交
1685
		return result;
1686
	}
1687

1688
	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
M
Matthew Wilcox 已提交
1689 1690 1691
	return result;
}

1692
static int nvme_create_io_queues(struct nvme_dev *dev)
K
Keith Busch 已提交
1693
{
J
Jens Axboe 已提交
1694
	unsigned i, max, rw_queues;
1695
	int ret = 0;
K
Keith Busch 已提交
1696

1697
	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1698
		if (nvme_alloc_queue(dev, i, dev->q_depth)) {
1699
			ret = -ENOMEM;
K
Keith Busch 已提交
1700
			break;
1701 1702
		}
	}
K
Keith Busch 已提交
1703

1704
	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1705 1706 1707
	if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
		rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
				dev->io_queues[HCTX_TYPE_READ];
J
Jens Axboe 已提交
1708 1709 1710 1711
	} else {
		rw_queues = max;
	}

1712
	for (i = dev->online_queues; i <= max; i++) {
J
Jens Axboe 已提交
1713 1714 1715
		bool polled = i > rw_queues;

		ret = nvme_create_queue(&dev->queues[i], i, polled);
K
Keith Busch 已提交
1716
		if (ret)
K
Keith Busch 已提交
1717
			break;
M
Matthew Wilcox 已提交
1718
	}
1719 1720 1721

	/*
	 * Ignore failing Create SQ/CQ commands, we can continue with less
1722 1723
	 * than the desired amount of queues, and even a controller without
	 * I/O queues can still be used to issue admin commands.  This might
1724 1725 1726
	 * be useful to upgrade a buggy firmware for example.
	 */
	return ret >= 0 ? 0 : ret;
M
Matthew Wilcox 已提交
1727 1728
}

1729 1730 1731 1732 1733 1734
static ssize_t nvme_cmb_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));

1735
	return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1736 1737 1738 1739
		       ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);

1740
static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1741
{
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
	u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;

	return 1ULL << (12 + 4 * szu);
}

static u32 nvme_cmb_size(struct nvme_dev *dev)
{
	return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
}

1752
static void nvme_map_cmb(struct nvme_dev *dev)
1753
{
1754
	u64 size, offset;
1755 1756
	resource_size_t bar_size;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
1757
	int bar;
1758

1759 1760 1761
	if (dev->cmb_size)
		return;

1762
	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1763 1764
	if (!dev->cmbsz)
		return;
1765
	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1766

1767 1768
	size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
	offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1769 1770
	bar = NVME_CMB_BIR(dev->cmbloc);
	bar_size = pci_resource_len(pdev, bar);
1771 1772

	if (offset > bar_size)
1773
		return;
1774 1775 1776 1777 1778 1779 1780 1781 1782

	/*
	 * Controllers may support a CMB size larger than their BAR,
	 * for example, due to being behind a bridge. Reduce the CMB to
	 * the reported size of the BAR
	 */
	if (size > bar_size - offset)
		size = bar_size - offset;

1783 1784 1785
	if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
		dev_warn(dev->ctrl.device,
			 "failed to register the CMB\n");
1786
		return;
1787 1788
	}

1789
	dev->cmb_size = size;
1790 1791 1792 1793 1794
	dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);

	if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
			(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
		pci_p2pmem_publish(pdev, true);
1795 1796 1797 1798 1799

	if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
				    &dev_attr_cmb.attr, NULL))
		dev_warn(dev->ctrl.device,
			 "failed to add sysfs attribute for CMB\n");
1800 1801 1802 1803
}

static inline void nvme_release_cmb(struct nvme_dev *dev)
{
1804
	if (dev->cmb_size) {
1805 1806
		sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
					     &dev_attr_cmb.attr, NULL);
1807
		dev->cmb_size = 0;
1808 1809 1810
	}
}

1811 1812
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
1813
	u64 dma_addr = dev->host_mem_descs_dma;
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
	struct nvme_command c;
	int ret;

	memset(&c, 0, sizeof(c));
	c.features.opcode	= nvme_admin_set_features;
	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
	c.features.dword11	= cpu_to_le32(bits);
	c.features.dword12	= cpu_to_le32(dev->host_mem_size >>
					      ilog2(dev->ctrl.page_size));
	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);

	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
	if (ret) {
		dev_warn(dev->ctrl.device,
			 "failed to set host mem (err %d, flags %#x).\n",
			 ret, bits);
	}
	return ret;
}

static void nvme_free_host_mem(struct nvme_dev *dev)
{
	int i;

	for (i = 0; i < dev->nr_host_mem_descs; i++) {
		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;

1844 1845 1846
		dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
			       le64_to_cpu(desc->addr),
			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1847 1848 1849 1850
	}

	kfree(dev->host_mem_desc_bufs);
	dev->host_mem_desc_bufs = NULL;
1851 1852 1853
	dma_free_coherent(dev->dev,
			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
			dev->host_mem_descs, dev->host_mem_descs_dma);
1854
	dev->host_mem_descs = NULL;
1855
	dev->nr_host_mem_descs = 0;
1856 1857
}

1858 1859
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
		u32 chunk_size)
K
Keith Busch 已提交
1860
{
1861
	struct nvme_host_mem_buf_desc *descs;
1862
	u32 max_entries, len;
1863
	dma_addr_t descs_dma;
1864
	int i = 0;
1865
	void **bufs;
1866
	u64 size, tmp;
1867 1868 1869 1870

	tmp = (preferred + chunk_size - 1);
	do_div(tmp, chunk_size);
	max_entries = tmp;
1871 1872 1873 1874

	if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
		max_entries = dev->ctrl.hmmaxd;

1875 1876
	descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
				   &descs_dma, GFP_KERNEL);
1877 1878 1879 1880 1881 1882 1883
	if (!descs)
		goto out;

	bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
	if (!bufs)
		goto out_free_descs;

1884
	for (size = 0; size < preferred && i < max_entries; size += len) {
1885 1886
		dma_addr_t dma_addr;

1887
		len = min_t(u64, chunk_size, preferred - size);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
		if (!bufs[i])
			break;

		descs[i].addr = cpu_to_le64(dma_addr);
		descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
		i++;
	}

1898
	if (!size)
1899 1900 1901 1902 1903
		goto out_free_bufs;

	dev->nr_host_mem_descs = i;
	dev->host_mem_size = size;
	dev->host_mem_descs = descs;
1904
	dev->host_mem_descs_dma = descs_dma;
1905 1906 1907 1908 1909 1910 1911
	dev->host_mem_desc_bufs = bufs;
	return 0;

out_free_bufs:
	while (--i >= 0) {
		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;

1912 1913 1914
		dma_free_attrs(dev->dev, size, bufs[i],
			       le64_to_cpu(descs[i].addr),
			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1915 1916 1917 1918
	}

	kfree(bufs);
out_free_descs:
1919 1920
	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
			descs_dma);
1921 1922 1923 1924 1925
out:
	dev->host_mem_descs = NULL;
	return -ENOMEM;
}

1926 1927 1928 1929 1930
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
	u32 chunk_size;

	/* start big and work our way down */
1931
	for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
1932
	     chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
	     chunk_size /= 2) {
		if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
			if (!min || dev->host_mem_size >= min)
				return 0;
			nvme_free_host_mem(dev);
		}
	}

	return -ENOMEM;
}

1944
static int nvme_setup_host_mem(struct nvme_dev *dev)
1945 1946 1947 1948 1949
{
	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
	u64 min = (u64)dev->ctrl.hmmin * 4096;
	u32 enable_bits = NVME_HOST_MEM_ENABLE;
1950
	int ret;
1951 1952 1953 1954 1955 1956 1957

	preferred = min(preferred, max);
	if (min > max) {
		dev_warn(dev->ctrl.device,
			"min host memory (%lld MiB) above limit (%d MiB).\n",
			min >> ilog2(SZ_1M), max_host_mem_size_mb);
		nvme_free_host_mem(dev);
1958
		return 0;
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
	}

	/*
	 * If we already have a buffer allocated check if we can reuse it.
	 */
	if (dev->host_mem_descs) {
		if (dev->host_mem_size >= min)
			enable_bits |= NVME_HOST_MEM_RETURN;
		else
			nvme_free_host_mem(dev);
	}

	if (!dev->host_mem_descs) {
1972 1973 1974
		if (nvme_alloc_host_mem(dev, min, preferred)) {
			dev_warn(dev->ctrl.device,
				"failed to allocate host memory buffer.\n");
1975
			return 0; /* controller must work without HMB */
1976 1977 1978 1979 1980
		}

		dev_info(dev->ctrl.device,
			"allocated %lld MiB host memory buffer.\n",
			dev->host_mem_size >> ilog2(SZ_1M));
1981 1982
	}

1983 1984
	ret = nvme_set_host_mem(dev, enable_bits);
	if (ret)
1985
		nvme_free_host_mem(dev);
1986
	return ret;
K
Keith Busch 已提交
1987 1988
}

1989 1990 1991 1992 1993
/*
 * nirqs is the number of interrupts available for write and read
 * queues. The core already reserved an interrupt for the admin queue.
 */
static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
1994
{
1995 1996
	struct nvme_dev *dev = affd->priv;
	unsigned int nr_read_queues;
1997 1998

	/*
1999 2000 2001 2002 2003 2004 2005 2006 2007
	 * If there is no interupt available for queues, ensure that
	 * the default queue is set to 1. The affinity set size is
	 * also set to one, but the irq core ignores it for this case.
	 *
	 * If only one interrupt is available or 'write_queue' == 0, combine
	 * write and read queues.
	 *
	 * If 'write_queues' > 0, ensure it leaves room for at least one read
	 * queue.
2008
	 */
2009 2010 2011 2012 2013 2014 2015
	if (!nrirqs) {
		nrirqs = 1;
		nr_read_queues = 0;
	} else if (nrirqs == 1 || !write_queues) {
		nr_read_queues = 0;
	} else if (write_queues >= nrirqs) {
		nr_read_queues = 1;
2016
	} else {
2017
		nr_read_queues = nrirqs - write_queues;
2018
	}
2019 2020 2021 2022 2023 2024

	dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
	affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
	dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
	affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
	affd->nr_sets = nr_read_queues ? 2 : 1;
2025 2026
}

2027
static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2028 2029 2030
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);
	struct irq_affinity affd = {
2031
		.pre_vectors	= 1,
2032 2033
		.calc_sets	= nvme_calc_irq_sets,
		.priv		= dev,
2034
	};
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	unsigned int irq_queues, this_p_queues;

	/*
	 * Poll queues don't need interrupts, but we need at least one IO
	 * queue left over for non-polled IO.
	 */
	this_p_queues = poll_queues;
	if (this_p_queues >= nr_io_queues) {
		this_p_queues = nr_io_queues - 1;
		irq_queues = 1;
	} else {
M
Ming Lei 已提交
2046
		irq_queues = nr_io_queues - this_p_queues + 1;
2047 2048
	}
	dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2049

2050 2051 2052
	/* Initialize for the single interrupt case */
	dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
	dev->io_queues[HCTX_TYPE_READ] = 0;
2053

2054 2055
	return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
			      PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2056 2057
}

2058 2059 2060 2061 2062 2063
static void nvme_disable_io_queues(struct nvme_dev *dev)
{
	if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
		__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}

2064
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2065
{
2066
	struct nvme_queue *adminq = &dev->queues[0];
2067
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2068 2069
	int result, nr_io_queues;
	unsigned long size;
M
Matthew Wilcox 已提交
2070

2071
	nr_io_queues = max_io_queues();
C
Christoph Hellwig 已提交
2072 2073
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
	if (result < 0)
M
Matthew Wilcox 已提交
2074
		return result;
C
Christoph Hellwig 已提交
2075

2076
	if (nr_io_queues == 0)
2077
		return 0;
2078 2079
	
	clear_bit(NVMEQ_ENABLED, &adminq->flags);
M
Matthew Wilcox 已提交
2080

2081
	if (dev->cmb_use_sqes) {
2082 2083 2084 2085 2086
		result = nvme_cmb_qdepth(dev, nr_io_queues,
				sizeof(struct nvme_command));
		if (result > 0)
			dev->q_depth = result;
		else
2087
			dev->cmb_use_sqes = false;
2088 2089
	}

2090 2091 2092 2093 2094 2095 2096 2097 2098
	do {
		size = db_bar_size(dev, nr_io_queues);
		result = nvme_remap_bar(dev, size);
		if (!result)
			break;
		if (!--nr_io_queues)
			return -ENOMEM;
	} while (1);
	adminq->q_db = dev->dbs;
2099

2100
 retry:
K
Keith Busch 已提交
2101
	/* Deregister the admin queue's interrupt */
2102
	pci_free_irq(pdev, 0, adminq);
K
Keith Busch 已提交
2103

2104 2105 2106 2107
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
2108
	pci_free_irq_vectors(pdev);
2109 2110

	result = nvme_setup_irqs(dev, nr_io_queues);
2111
	if (result <= 0)
2112
		return -EIO;
2113

2114
	dev->num_vecs = result;
J
Jens Axboe 已提交
2115
	result = max(result - 1, 1);
2116
	dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
R
Ramachandra Rao Gajula 已提交
2117

2118 2119 2120 2121 2122 2123
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */
2124
	result = queue_request_irq(adminq);
2125
	if (result)
K
Keith Busch 已提交
2126
		return result;
2127
	set_bit(NVMEQ_ENABLED, &adminq->flags);
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143

	result = nvme_create_io_queues(dev);
	if (result || dev->online_queues < 2)
		return result;

	if (dev->online_queues - 1 < dev->max_qid) {
		nr_io_queues = dev->online_queues - 1;
		nvme_disable_io_queues(dev);
		nvme_suspend_io_queues(dev);
		goto retry;
	}
	dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
					dev->io_queues[HCTX_TYPE_DEFAULT],
					dev->io_queues[HCTX_TYPE_READ],
					dev->io_queues[HCTX_TYPE_POLL]);
	return 0;
M
Matthew Wilcox 已提交
2144 2145
}

2146
static void nvme_del_queue_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
2147
{
K
Keith Busch 已提交
2148
	struct nvme_queue *nvmeq = req->end_io_data;
2149

K
Keith Busch 已提交
2150
	blk_mq_free_request(req);
2151
	complete(&nvmeq->delete_done);
K
Keith Busch 已提交
2152 2153
}

2154
static void nvme_del_cq_end(struct request *req, blk_status_t error)
K
Keith Busch 已提交
2155
{
K
Keith Busch 已提交
2156
	struct nvme_queue *nvmeq = req->end_io_data;
K
Keith Busch 已提交
2157

2158 2159
	if (error)
		set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
K
Keith Busch 已提交
2160 2161

	nvme_del_queue_end(req, error);
K
Keith Busch 已提交
2162 2163
}

K
Keith Busch 已提交
2164
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2165
{
K
Keith Busch 已提交
2166 2167 2168
	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
	struct request *req;
	struct nvme_command cmd;
2169

K
Keith Busch 已提交
2170 2171 2172
	memset(&cmd, 0, sizeof(cmd));
	cmd.delete_queue.opcode = opcode;
	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2173

2174
	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
K
Keith Busch 已提交
2175 2176
	if (IS_ERR(req))
		return PTR_ERR(req);
2177

K
Keith Busch 已提交
2178 2179 2180
	req->timeout = ADMIN_TIMEOUT;
	req->end_io_data = nvmeq;

2181
	init_completion(&nvmeq->delete_done);
K
Keith Busch 已提交
2182 2183 2184 2185
	blk_execute_rq_nowait(q, NULL, req, false,
			opcode == nvme_admin_delete_cq ?
				nvme_del_cq_end : nvme_del_queue_end);
	return 0;
2186 2187
}

2188
static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
K
Keith Busch 已提交
2189
{
2190
	int nr_queues = dev->online_queues - 1, sent = 0;
K
Keith Busch 已提交
2191
	unsigned long timeout;
K
Keith Busch 已提交
2192

K
Keith Busch 已提交
2193
 retry:
2194 2195 2196 2197 2198 2199
	timeout = ADMIN_TIMEOUT;
	while (nr_queues > 0) {
		if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
			break;
		nr_queues--;
		sent++;
K
Keith Busch 已提交
2200
	}
2201 2202 2203 2204
	while (sent) {
		struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];

		timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
2205 2206 2207
				timeout);
		if (timeout == 0)
			return false;
2208 2209 2210 2211 2212 2213 2214

		/* handle any remaining CQEs */
		if (opcode == nvme_admin_delete_cq &&
		    !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
			nvme_poll_irqdisable(nvmeq, -1);

		sent--;
2215 2216 2217 2218
		if (nr_queues)
			goto retry;
	}
	return true;
K
Keith Busch 已提交
2219 2220
}

2221
/*
2222
 * return error value only when tagset allocation failed
2223
 */
2224
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2225
{
2226 2227
	int ret;

2228
	if (!dev->ctrl.tagset) {
2229
		dev->tagset.ops = &nvme_mq_ops;
2230
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
2231 2232 2233
		dev->tagset.nr_maps = 2; /* default + read */
		if (dev->io_queues[HCTX_TYPE_POLL])
			dev->tagset.nr_maps++;
2234 2235 2236
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev_to_node(dev->dev);
		dev->tagset.queue_depth =
M
Matias Bjørling 已提交
2237
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2238
		dev->tagset.cmd_size = sizeof(struct nvme_iod);
2239 2240
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
M
Matthew Wilcox 已提交
2241

2242 2243 2244 2245 2246 2247
		ret = blk_mq_alloc_tag_set(&dev->tagset);
		if (ret) {
			dev_warn(dev->ctrl.device,
				"IO queues tagset allocation failed %d\n", ret);
			return ret;
		}
2248
		dev->ctrl.tagset = &dev->tagset;
2249 2250

		nvme_dbbuf_set(dev);
2251 2252 2253 2254 2255
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
2256
	}
2257

K
Keith Busch 已提交
2258
	return 0;
M
Matthew Wilcox 已提交
2259 2260
}

2261
static int nvme_pci_enable(struct nvme_dev *dev)
2262
{
2263
	int result = -ENOMEM;
2264
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2265 2266 2267 2268 2269 2270

	if (pci_enable_device_mem(pdev))
		return result;

	pci_set_master(pdev);

2271 2272
	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
2273
		goto disable;
2274

2275
	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
K
Keith Busch 已提交
2276
		result = -ENODEV;
2277
		goto disable;
K
Keith Busch 已提交
2278
	}
2279 2280

	/*
2281 2282 2283
	 * Some devices and/or platforms don't advertise or work with INTx
	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
	 * adjust this later.
2284
	 */
2285 2286 2287
	result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (result < 0)
		return result;
2288

2289
	dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2290

2291
	dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2292
				io_queue_depth);
2293
	dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2294
	dev->dbs = dev->bar + 4096;
2295 2296 2297 2298 2299 2300 2301

	/*
	 * Temporary fix for the Apple controller found in the MacBook8,1 and
	 * some MacBook7,1 to avoid controller resets and data loss.
	 */
	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
		dev->q_depth = 2;
2302 2303
		dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
			"set queue depth=%u to work around controller resets\n",
2304
			dev->q_depth);
2305 2306
	} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
		   (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2307
		   NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2308 2309 2310
		dev->q_depth = 64;
		dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
                        "set queue depth=%u\n", dev->q_depth);
2311 2312
	}

2313
	nvme_map_cmb(dev);
2314

K
Keith Busch 已提交
2315 2316
	pci_enable_pcie_error_reporting(pdev);
	pci_save_state(pdev);
2317 2318 2319 2320 2321 2322 2323 2324
	return 0;

 disable:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
2325 2326 2327
{
	if (dev->bar)
		iounmap(dev->bar);
2328
	pci_release_mem_regions(to_pci_dev(dev->dev));
2329 2330 2331
}

static void nvme_pci_disable(struct nvme_dev *dev)
2332
{
2333 2334
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2335
	pci_free_irq_vectors(pdev);
2336

K
Keith Busch 已提交
2337 2338
	if (pci_is_enabled(pdev)) {
		pci_disable_pcie_error_reporting(pdev);
2339
		pci_disable_device(pdev);
K
Keith Busch 已提交
2340 2341 2342
	}
}

2343
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
M
Matthew Wilcox 已提交
2344
{
K
Keith Busch 已提交
2345 2346
	bool dead = true;
	struct pci_dev *pdev = to_pci_dev(dev->dev);
2347

2348
	mutex_lock(&dev->shutdown_lock);
K
Keith Busch 已提交
2349 2350 2351
	if (pci_is_enabled(pdev)) {
		u32 csts = readl(dev->bar + NVME_REG_CSTS);

K
Keith Busch 已提交
2352 2353
		if (dev->ctrl.state == NVME_CTRL_LIVE ||
		    dev->ctrl.state == NVME_CTRL_RESETTING)
K
Keith Busch 已提交
2354 2355 2356
			nvme_start_freeze(&dev->ctrl);
		dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
			pdev->error_state  != pci_channel_io_normal);
2357
	}
2358

K
Keith Busch 已提交
2359 2360 2361 2362
	/*
	 * Give the controller a chance to complete all entered requests if
	 * doing a safe shutdown.
	 */
2363 2364 2365
	if (!dead) {
		if (shutdown)
			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2366 2367 2368
	}

	nvme_stop_queues(&dev->ctrl);
2369

2370
	if (!dead && dev->ctrl.queue_count > 0) {
2371
		nvme_disable_io_queues(dev);
2372
		nvme_disable_admin_queue(dev, shutdown);
K
Keith Busch 已提交
2373
	}
2374 2375
	nvme_suspend_io_queues(dev);
	nvme_suspend_queue(&dev->queues[0]);
2376
	nvme_pci_disable(dev);
2377

2378 2379
	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
K
Keith Busch 已提交
2380 2381 2382 2383 2384 2385 2386 2387

	/*
	 * The driver will not be starting up queues again if shutting down so
	 * must flush all entered requests to their failed completion to avoid
	 * deadlocking blk-mq hot-cpu notifier.
	 */
	if (shutdown)
		nvme_start_queues(&dev->ctrl);
2388
	mutex_unlock(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
2389 2390
}

M
Matthew Wilcox 已提交
2391 2392
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
2393
	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
M
Matthew Wilcox 已提交
2394 2395 2396 2397
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

2398
	/* Optimisation for I/Os between 4k and 128k */
2399
	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2400 2401 2402 2403 2404
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
2405 2406 2407 2408 2409 2410
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
2411
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
2412 2413
}

2414
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2415
{
2416
	struct nvme_dev *dev = to_nvme_dev(ctrl);
2417

2418
	nvme_dbbuf_dma_free(dev);
2419
	put_device(dev->dev);
2420 2421
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
2422 2423
	if (dev->ctrl.admin_q)
		blk_put_queue(dev->ctrl.admin_q);
2424
	kfree(dev->queues);
2425
	free_opal_dev(dev->ctrl.opal_dev);
2426
	mempool_destroy(dev->iod_mempool);
2427 2428 2429
	kfree(dev);
}

2430 2431
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
2432
	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
2433

2434
	nvme_get_ctrl(&dev->ctrl);
2435
	nvme_dev_disable(dev, false);
2436
	nvme_kill_queues(&dev->ctrl);
2437
	if (!queue_work(nvme_wq, &dev->remove_work))
2438 2439 2440
		nvme_put_ctrl(&dev->ctrl);
}

2441
static void nvme_reset_work(struct work_struct *work)
2442
{
2443 2444
	struct nvme_dev *dev =
		container_of(work, struct nvme_dev, ctrl.reset_work);
2445
	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2446
	int result = -ENODEV;
2447
	enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
2448

2449
	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
2450
		goto out;
2451

2452 2453 2454 2455
	/*
	 * If we're called to reset a live controller first shut it down before
	 * moving on.
	 */
2456
	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2457
		nvme_dev_disable(dev, false);
2458

2459
	mutex_lock(&dev->shutdown_lock);
2460
	result = nvme_pci_enable(dev);
2461
	if (result)
2462
		goto out_unlock;
2463

2464
	result = nvme_pci_configure_admin_queue(dev);
2465
	if (result)
2466
		goto out_unlock;
2467

K
Keith Busch 已提交
2468 2469
	result = nvme_alloc_admin_tags(dev);
	if (result)
2470
		goto out_unlock;
2471

2472 2473 2474 2475 2476 2477
	/*
	 * Limit the max command size to prevent iod->sg allocations going
	 * over a single page.
	 */
	dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
	dev->ctrl.max_segments = NVME_MAX_SEGS;
2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	mutex_unlock(&dev->shutdown_lock);

	/*
	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
	 * initializing procedure here.
	 */
	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
		dev_warn(dev->ctrl.device,
			"failed to mark controller CONNECTING\n");
		goto out;
	}
2489

2490 2491
	result = nvme_init_identify(&dev->ctrl);
	if (result)
2492
		goto out;
2493

2494 2495 2496 2497 2498 2499 2500 2501 2502
	if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
		if (!dev->ctrl.opal_dev)
			dev->ctrl.opal_dev =
				init_opal_dev(&dev->ctrl, &nvme_sec_submit);
		else if (was_suspend)
			opal_unlock_from_suspend(dev->ctrl.opal_dev);
	} else {
		free_opal_dev(dev->ctrl.opal_dev);
		dev->ctrl.opal_dev = NULL;
2503
	}
2504

2505 2506 2507 2508 2509 2510 2511
	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
		result = nvme_dbbuf_dma_alloc(dev);
		if (result)
			dev_warn(dev->dev,
				 "unable to allocate dma for dbbuf\n");
	}

2512 2513 2514 2515 2516
	if (dev->ctrl.hmpre) {
		result = nvme_setup_host_mem(dev);
		if (result < 0)
			goto out;
	}
2517

2518
	result = nvme_setup_io_queues(dev);
2519
	if (result)
2520
		goto out;
2521

2522 2523 2524 2525
	/*
	 * Keep the controller around but remove all namespaces if we don't have
	 * any working I/O queue.
	 */
2526
	if (dev->online_queues < 2) {
2527
		dev_warn(dev->ctrl.device, "IO queues not created\n");
2528
		nvme_kill_queues(&dev->ctrl);
2529
		nvme_remove_namespaces(&dev->ctrl);
2530
		new_state = NVME_CTRL_ADMIN_ONLY;
2531
	} else {
2532
		nvme_start_queues(&dev->ctrl);
K
Keith Busch 已提交
2533
		nvme_wait_freeze(&dev->ctrl);
2534 2535 2536
		/* hit this only when allocate tagset fails */
		if (nvme_dev_add(dev))
			new_state = NVME_CTRL_ADMIN_ONLY;
K
Keith Busch 已提交
2537
		nvme_unfreeze(&dev->ctrl);
2538 2539
	}

2540 2541 2542 2543 2544 2545 2546
	/*
	 * If only admin queue live, keep it to do further investigation or
	 * recovery.
	 */
	if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
		dev_warn(dev->ctrl.device,
			"failed to mark controller state %d\n", new_state);
2547 2548
		goto out;
	}
2549

2550
	nvme_start_ctrl(&dev->ctrl);
2551
	return;
2552

2553 2554
 out_unlock:
	mutex_unlock(&dev->shutdown_lock);
2555
 out:
2556
	nvme_remove_dead_ctrl(dev, result);
2557 2558
}

2559
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
K
Keith Busch 已提交
2560
{
2561
	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2562
	struct pci_dev *pdev = to_pci_dev(dev->dev);
K
Keith Busch 已提交
2563 2564

	if (pci_get_drvdata(pdev))
K
Keith Busch 已提交
2565
		device_release_driver(&pdev->dev);
2566
	nvme_put_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2567 2568
}

2569
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
T
Tejun Heo 已提交
2570
{
2571
	*val = readl(to_nvme_dev(ctrl)->bar + off);
2572
	return 0;
T
Tejun Heo 已提交
2573 2574
}

2575
static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2576
{
2577 2578 2579
	writel(val, to_nvme_dev(ctrl)->bar + off);
	return 0;
}
2580

2581 2582 2583 2584
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
	*val = readq(to_nvme_dev(ctrl)->bar + off);
	return 0;
2585 2586
}

2587 2588 2589 2590 2591 2592 2593
static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{
	struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);

	return snprintf(buf, size, "%s", dev_name(&pdev->dev));
}

2594
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
M
Ming Lin 已提交
2595
	.name			= "pcie",
2596
	.module			= THIS_MODULE,
2597 2598
	.flags			= NVME_F_METADATA_SUPPORTED |
				  NVME_F_PCI_P2PDMA,
2599
	.reg_read32		= nvme_pci_reg_read32,
2600
	.reg_write32		= nvme_pci_reg_write32,
2601
	.reg_read64		= nvme_pci_reg_read64,
2602
	.free_ctrl		= nvme_pci_free_ctrl,
2603
	.submit_async_event	= nvme_pci_submit_async_event,
2604
	.get_address		= nvme_pci_get_address,
2605
};
2606

2607 2608 2609 2610
static int nvme_dev_map(struct nvme_dev *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);

2611
	if (pci_request_mem_regions(pdev, "nvme"))
2612 2613
		return -ENODEV;

2614
	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2615 2616
		goto release;

M
Max Gurtovoy 已提交
2617
	return 0;
2618
  release:
M
Max Gurtovoy 已提交
2619 2620
	pci_release_mem_regions(pdev);
	return -ENODEV;
2621 2622
}

2623
static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
{
	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
		/*
		 * Several Samsung devices seem to drop off the PCIe bus
		 * randomly when APST is on and uses the deepest sleep state.
		 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
		 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
		 * 950 PRO 256GB", but it seems to be restricted to two Dell
		 * laptops.
		 */
		if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
			return NVME_QUIRK_NO_DEEPEST_PS;
2638 2639 2640
	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
		/*
		 * Samsung SSD 960 EVO drops off the PCIe bus after system
2641 2642 2643
		 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
		 * within few minutes after bootup on a Coffee Lake board -
		 * ASUS PRIME Z370-A
2644 2645
		 */
		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
2646 2647
		    (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
		     dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
2648
			return NVME_QUIRK_NO_APST;
2649 2650 2651 2652 2653
	}

	return 0;
}

2654 2655 2656
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
	struct nvme_dev *dev = data;
2657

2658 2659
	nvme_reset_ctrl_sync(&dev->ctrl);
	flush_work(&dev->ctrl.scan_work);
2660
	nvme_put_ctrl(&dev->ctrl);
2661 2662
}

2663
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
2664
{
M
Matias Bjørling 已提交
2665
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
2666
	struct nvme_dev *dev;
2667
	unsigned long quirks = id->driver_data;
2668
	size_t alloc_size;
M
Matthew Wilcox 已提交
2669

M
Matias Bjørling 已提交
2670 2671
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
2672
		set_dev_node(&pdev->dev, first_memory_node);
M
Matias Bjørling 已提交
2673 2674

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2675 2676
	if (!dev)
		return -ENOMEM;
2677

2678 2679
	dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
					GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2680 2681 2682
	if (!dev->queues)
		goto free;

2683
	dev->dev = get_device(&pdev->dev);
K
Keith Busch 已提交
2684
	pci_set_drvdata(pdev, dev);
2685

2686 2687
	result = nvme_dev_map(dev);
	if (result)
2688
		goto put_pci;
2689

2690
	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2691
	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2692
	mutex_init(&dev->shutdown_lock);
M
Matthew Wilcox 已提交
2693

M
Matthew Wilcox 已提交
2694 2695
	result = nvme_setup_prp_pools(dev);
	if (result)
2696
		goto unmap;
2697

2698
	quirks |= check_vendor_combination_bug(pdev);
2699

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
	/*
	 * Double check that our mempool alloc size will cover the biggest
	 * command we support.
	 */
	alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
						NVME_MAX_SEGS, true);
	WARN_ON_ONCE(alloc_size > PAGE_SIZE);

	dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
						mempool_kfree,
						(void *) alloc_size,
						GFP_KERNEL, node);
	if (!dev->iod_mempool) {
		result = -ENOMEM;
		goto release_pools;
	}

2717 2718 2719 2720 2721
	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
			quirks);
	if (result)
		goto release_mempool;

2722 2723
	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));

2724
	nvme_get_ctrl(&dev->ctrl);
2725
	async_schedule(nvme_async_probe, dev);
2726

M
Matthew Wilcox 已提交
2727 2728
	return 0;

2729 2730
 release_mempool:
	mempool_destroy(dev->iod_mempool);
2731
 release_pools:
M
Matthew Wilcox 已提交
2732
	nvme_release_prp_pools(dev);
2733 2734
 unmap:
	nvme_dev_unmap(dev);
K
Keith Busch 已提交
2735
 put_pci:
2736
	put_device(dev->dev);
M
Matthew Wilcox 已提交
2737 2738 2739 2740 2741 2742
 free:
	kfree(dev->queues);
	kfree(dev);
	return result;
}

2743
static void nvme_reset_prepare(struct pci_dev *pdev)
2744
{
K
Keith Busch 已提交
2745
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2746
	nvme_dev_disable(dev, false);
2747
}
2748

2749 2750
static void nvme_reset_done(struct pci_dev *pdev)
{
2751
	struct nvme_dev *dev = pci_get_drvdata(pdev);
S
Sagi Grimberg 已提交
2752
	nvme_reset_ctrl_sync(&dev->ctrl);
2753 2754
}

2755 2756 2757
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2758
	nvme_dev_disable(dev, true);
2759 2760
}

2761 2762 2763 2764 2765
/*
 * The driver's remove may be called on a device in a partially initialized
 * state. This function must not have any dependencies on the device state in
 * order to proceed.
 */
2766
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2767 2768
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2769

2770
	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
K
Keith Busch 已提交
2771
	pci_set_drvdata(pdev, NULL);
2772

2773
	if (!pci_device_is_present(pdev)) {
2774
		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2775
		nvme_dev_disable(dev, true);
2776
		nvme_dev_remove_admin(dev);
2777
	}
2778

2779
	flush_work(&dev->ctrl.reset_work);
2780 2781
	nvme_stop_ctrl(&dev->ctrl);
	nvme_remove_namespaces(&dev->ctrl);
2782
	nvme_dev_disable(dev, true);
2783
	nvme_release_cmb(dev);
2784
	nvme_free_host_mem(dev);
M
Matias Bjørling 已提交
2785
	nvme_dev_remove_admin(dev);
2786
	nvme_free_queues(dev, 0);
2787
	nvme_uninit_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2788
	nvme_release_prp_pools(dev);
2789
	nvme_dev_unmap(dev);
2790
	nvme_put_ctrl(&dev->ctrl);
M
Matthew Wilcox 已提交
2791 2792
}

2793
#ifdef CONFIG_PM_SLEEP
2794 2795 2796 2797 2798
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2799
	nvme_dev_disable(ndev, true);
2800 2801 2802 2803 2804 2805 2806 2807
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

2808
	nvme_reset_ctrl(&ndev->ctrl);
K
Keith Busch 已提交
2809
	return 0;
2810
}
2811
#endif
2812 2813

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2814

K
Keith Busch 已提交
2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	/*
	 * A frozen channel requires a reset. When detected, this method will
	 * shutdown the controller to quiesce. The controller will be restarted
	 * after the slot reset through driver's slot_reset callback.
	 */
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
	case pci_channel_io_frozen:
K
Keith Busch 已提交
2829 2830
		dev_warn(dev->ctrl.device,
			"frozen state error detected, reset controller\n");
2831
		nvme_dev_disable(dev, false);
K
Keith Busch 已提交
2832 2833
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
K
Keith Busch 已提交
2834 2835
		dev_warn(dev->ctrl.device,
			"failure state error detected, request disconnect\n");
K
Keith Busch 已提交
2836 2837 2838 2839 2840 2841 2842 2843 2844
		return PCI_ERS_RESULT_DISCONNECT;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);

2845
	dev_info(dev->ctrl.device, "restart after slot reset\n");
K
Keith Busch 已提交
2846
	pci_restore_state(pdev);
2847
	nvme_reset_ctrl(&dev->ctrl);
K
Keith Busch 已提交
2848 2849 2850 2851 2852
	return PCI_ERS_RESULT_RECOVERED;
}

static void nvme_error_resume(struct pci_dev *pdev)
{
K
Keith Busch 已提交
2853 2854 2855
	struct nvme_dev *dev = pci_get_drvdata(pdev);

	flush_work(&dev->ctrl.reset_work);
K
Keith Busch 已提交
2856 2857
}

2858
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2859 2860 2861
	.error_detected	= nvme_error_detected,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2862 2863
	.reset_prepare	= nvme_reset_prepare,
	.reset_done	= nvme_reset_done,
M
Matthew Wilcox 已提交
2864 2865
};

2866
static const struct pci_device_id nvme_id_table[] = {
2867
	{ PCI_VDEVICE(INTEL, 0x0953),
2868
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2869
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2870 2871
	{ PCI_VDEVICE(INTEL, 0x0a53),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2872
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2873 2874
	{ PCI_VDEVICE(INTEL, 0x0a54),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2875
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2876 2877 2878
	{ PCI_VDEVICE(INTEL, 0x0a55),
		.driver_data = NVME_QUIRK_STRIPE_SIZE |
				NVME_QUIRK_DEALLOCATE_ZEROES, },
2879
	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
2880 2881
		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
				NVME_QUIRK_MEDIUM_PRIO_SQ },
2882 2883
	{ PCI_VDEVICE(INTEL, 0xf1a6),	/* Intel 760p/Pro 7600p */
		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2884
	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
2885 2886
		.driver_data = NVME_QUIRK_IDENTIFY_CNS |
				NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2887 2888
	{ PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2889 2890
	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2891 2892
	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2893 2894
	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2895 2896 2897 2898
	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
C
Christoph Hellwig 已提交
2899 2900 2901 2902
	{ PCI_DEVICE(0x1d1d, 0x1f1f),	/* LighNVM qemu device */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
	{ PCI_DEVICE(0x1d1d, 0x2807),	/* CNEX WL */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
W
Wei Xu 已提交
2903 2904
	{ PCI_DEVICE(0x1d1d, 0x2601),	/* CNEX Granby */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
M
Matthew Wilcox 已提交
2905
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2906
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2907
	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
M
Matthew Wilcox 已提交
2908 2909 2910 2911 2912 2913 2914 2915
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2916
	.remove		= nvme_remove,
2917
	.shutdown	= nvme_shutdown,
2918 2919 2920
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
2921
	.sriov_configure = pci_sriov_configure_simple,
M
Matthew Wilcox 已提交
2922 2923 2924 2925 2926
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2927
	BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
2928
	return pci_register_driver(&nvme_driver);
M
Matthew Wilcox 已提交
2929 2930 2931 2932 2933
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
2934
	flush_workqueue(nvme_wq);
2935
	_nvme_check_size();
M
Matthew Wilcox 已提交
2936 2937 2938 2939
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2940
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2941 2942
module_init(nvme_init);
module_exit(nvme_exit);