nvme-core.c 71.4 KB
Newer Older
M
Matthew Wilcox 已提交
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
M
Matthew Wilcox 已提交
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/nvme.h>
16
#include <linux/bitops.h>
M
Matthew Wilcox 已提交
17
#include <linux/blkdev.h>
M
Matias Bjørling 已提交
18
#include <linux/blk-mq.h>
K
Keith Busch 已提交
19
#include <linux/cpu.h>
20
#include <linux/delay.h>
M
Matthew Wilcox 已提交
21 22 23
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
K
Keith Busch 已提交
24
#include <linux/hdreg.h>
25
#include <linux/idr.h>
M
Matthew Wilcox 已提交
26 27 28 29
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
30
#include <linux/kthread.h>
M
Matthew Wilcox 已提交
31 32 33 34 35
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
36
#include <linux/poison.h>
37
#include <linux/ptrace.h>
M
Matthew Wilcox 已提交
38 39 40
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
V
Vishal Verma 已提交
41
#include <scsi/sg.h>
42 43
#include <asm-generic/io-64-nonatomic-lo-hi.h>

44
#define NVME_Q_DEPTH		1024
M
Matias Bjørling 已提交
45
#define NVME_AQ_DEPTH		64
M
Matthew Wilcox 已提交
46 47
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
48
#define ADMIN_TIMEOUT		(admin_timeout * HZ)
49
#define SHUTDOWN_TIMEOUT	(shutdown_timeout * HZ)
50 51 52 53 54
#define IOD_TIMEOUT		(retry_time * HZ)

static unsigned char admin_timeout = 60;
module_param(admin_timeout, byte, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
M
Matthew Wilcox 已提交
55

56 57
unsigned char nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
58
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
M
Matthew Wilcox 已提交
59

60 61 62 63
static unsigned char retry_time = 30;
module_param(retry_time, byte, 0644);
MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");

64 65 66 67
static unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

M
Matthew Wilcox 已提交
68 69 70
static int nvme_major;
module_param(nvme_major, int, 0);

71 72 73
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

74 75 76
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
K
Keith Busch 已提交
77
static struct workqueue_struct *nvme_workq;
78
static wait_queue_head_t nvme_kthread_wait;
79
static struct notifier_block nvme_nb;
80

81
static void nvme_reset_failed_dev(struct work_struct *ws);
M
Matias Bjørling 已提交
82
static int nvme_process_cq(struct nvme_queue *nvmeq);
83

K
Keith Busch 已提交
84 85 86
struct async_cmd_info {
	struct kthread_work work;
	struct kthread_worker *worker;
M
Matias Bjørling 已提交
87
	struct request *req;
K
Keith Busch 已提交
88 89 90 91
	u32 result;
	int status;
	void *ctx;
};
92

M
Matthew Wilcox 已提交
93 94 95 96 97
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
K
Keith Busch 已提交
98
	struct llist_node node;
M
Matthew Wilcox 已提交
99
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
100
	struct nvme_dev *dev;
101
	char irqname[24];	/* nvme4294967295-65535\0 */
M
Matthew Wilcox 已提交
102 103 104 105 106 107 108 109 110 111 112
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
	volatile struct nvme_completion *cqes;
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
	u16 cq_vector;
	u16 sq_head;
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
113
	u16 qid;
114 115
	u8 cq_phase;
	u8 cqe_seen;
K
Keith Busch 已提交
116
	struct async_cmd_info cmdinfo;
M
Matias Bjørling 已提交
117
	struct blk_mq_hw_ctx *hctx;
M
Matthew Wilcox 已提交
118 119 120 121 122 123 124 125 126 127 128 129
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
130
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
131
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
132 133 134 135
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
136
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
M
Matthew Wilcox 已提交
137 138
}

139
typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
140 141
						struct nvme_completion *);

142
struct nvme_cmd_info {
143 144
	nvme_completion_fn fn;
	void *ctx;
K
Keith Busch 已提交
145
	int aborted;
M
Matias Bjørling 已提交
146
	struct nvme_queue *nvmeq;
147 148
};

M
Matias Bjørling 已提交
149 150
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
151
{
M
Matias Bjørling 已提交
152 153 154 155 156 157 158
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[0];

	WARN_ON(nvmeq->hctx);
	nvmeq->hctx = hctx;
	hctx->driver_data = nvmeq;
	return 0;
159 160
}

M
Matias Bjørling 已提交
161 162 163
static int nvme_admin_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
164
{
M
Matias Bjørling 已提交
165 166 167 168 169 170 171
	struct nvme_dev *dev = data;
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = dev->queues[0];

	BUG_ON(!nvmeq);
	cmd->nvmeq = nvmeq;
	return 0;
172 173
}

J
Jens Axboe 已提交
174 175 176 177 178 179 180
static void nvme_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->hctx = NULL;
}

M
Matias Bjørling 已提交
181 182
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
M
Matthew Wilcox 已提交
183
{
M
Matias Bjørling 已提交
184 185 186
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[
					(hctx_idx % dev->queue_count) + 1];
M
Matthew Wilcox 已提交
187

M
Matias Bjørling 已提交
188 189 190 191 192 193
	if (!nvmeq->hctx)
		nvmeq->hctx = hctx;

	/* nvmeq queues are shared between namespaces. We assume here that
	 * blk-mq map the tags so they match up with the nvme queue tags. */
	WARN_ON(nvmeq->hctx->tags != hctx->tags);
M
Matthew Wilcox 已提交
194

M
Matias Bjørling 已提交
195 196
	hctx->driver_data = nvmeq;
	return 0;
M
Matthew Wilcox 已提交
197 198
}

M
Matias Bjørling 已提交
199 200 201
static int nvme_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
M
Matthew Wilcox 已提交
202
{
M
Matias Bjørling 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
	struct nvme_dev *dev = data;
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];

	BUG_ON(!nvmeq);
	cmd->nvmeq = nvmeq;
	return 0;
}

static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
				nvme_completion_fn handler)
{
	cmd->fn = handler;
	cmd->ctx = ctx;
	cmd->aborted = 0;
K
Keith Busch 已提交
218
	blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
M
Matthew Wilcox 已提交
219 220
}

221 222
/* Special values must be less than 0x1000 */
#define CMD_CTX_BASE		((void *)POISON_POINTER_DELTA)
223 224 225
#define CMD_CTX_CANCELLED	(0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
226

227
static void special_completion(struct nvme_queue *nvmeq, void *ctx,
228 229 230 231 232
						struct nvme_completion *cqe)
{
	if (ctx == CMD_CTX_CANCELLED)
		return;
	if (ctx == CMD_CTX_COMPLETED) {
233
		dev_warn(nvmeq->q_dmadev,
234 235 236 237 238
				"completed id %d twice on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
	if (ctx == CMD_CTX_INVALID) {
239
		dev_warn(nvmeq->q_dmadev,
240 241 242 243
				"invalid id %d completed on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
244
	dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
245 246
}

M
Matias Bjørling 已提交
247
static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
M
Matthew Wilcox 已提交
248
{
249
	void *ctx;
M
Matthew Wilcox 已提交
250

251
	if (fn)
M
Matias Bjørling 已提交
252 253 254 255
		*fn = cmd->fn;
	ctx = cmd->ctx;
	cmd->fn = special_completion;
	cmd->ctx = CMD_CTX_CANCELLED;
256
	return ctx;
M
Matthew Wilcox 已提交
257 258
}

M
Matias Bjørling 已提交
259 260
static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
261
{
M
Matias Bjørling 已提交
262
	struct request *req = ctx;
263

M
Matias Bjørling 已提交
264 265 266 267 268 269 270 271 272
	u32 result = le32_to_cpup(&cqe->result);
	u16 status = le16_to_cpup(&cqe->status) >> 1;

	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
		++nvmeq->dev->event_limit;
	if (status == NVME_SC_SUCCESS)
		dev_warn(nvmeq->q_dmadev,
			"async event result %08x\n", result);

273
	blk_mq_free_hctx_request(nvmeq->hctx, req);
M
Matthew Wilcox 已提交
274 275
}

M
Matias Bjørling 已提交
276 277
static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
278
{
M
Matias Bjørling 已提交
279 280 281 282
	struct request *req = ctx;

	u16 status = le16_to_cpup(&cqe->status) >> 1;
	u32 result = le32_to_cpup(&cqe->result);
283

284
	blk_mq_free_hctx_request(nvmeq->hctx, req);
285

M
Matias Bjørling 已提交
286 287
	dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
	++nvmeq->dev->abort_limit;
288 289
}

M
Matias Bjørling 已提交
290 291
static void async_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
M
Matthew Wilcox 已提交
292
{
M
Matias Bjørling 已提交
293 294 295 296
	struct async_cmd_info *cmdinfo = ctx;
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
297
	blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req);
M
Matthew Wilcox 已提交
298 299
}

M
Matias Bjørling 已提交
300 301
static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
				  unsigned int tag)
M
Matthew Wilcox 已提交
302
{
M
Matias Bjørling 已提交
303 304
	struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
	struct request *req = blk_mq_tag_to_rq(hctx->tags, tag);
305

M
Matias Bjørling 已提交
306
	return blk_mq_rq_to_pdu(req);
307 308
}

M
Matias Bjørling 已提交
309 310 311 312 313
/*
 * Called with local interrupts disabled and the q_lock held.  May not sleep.
 */
static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
						nvme_completion_fn *fn)
314
{
M
Matias Bjørling 已提交
315 316 317 318 319 320 321 322 323 324 325 326
	struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
	void *ctx;
	if (tag >= nvmeq->q_depth) {
		*fn = special_completion;
		return CMD_CTX_INVALID;
	}
	if (fn)
		*fn = cmd->fn;
	ctx = cmd->ctx;
	cmd->fn = special_completion;
	cmd->ctx = CMD_CTX_COMPLETED;
	return ctx;
M
Matthew Wilcox 已提交
327 328 329
}

/**
330
 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
331 332 333 334 335
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
M
Matias Bjørling 已提交
336
static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
M
Matthew Wilcox 已提交
337
{
M
Matias Bjørling 已提交
338 339
	u16 tail = nvmeq->sq_tail;

M
Matthew Wilcox 已提交
340 341 342
	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
	if (++tail == nvmeq->q_depth)
		tail = 0;
343
	writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
344 345 346 347 348
	nvmeq->sq_tail = tail;

	return 0;
}

M
Matias Bjørling 已提交
349 350 351 352 353 354 355 356 357 358
static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
	unsigned long flags;
	int ret;
	spin_lock_irqsave(&nvmeq->q_lock, flags);
	ret = __nvme_submit_cmd(nvmeq, cmd);
	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
	return ret;
}

359
static __le64 **iod_list(struct nvme_iod *iod)
360
{
361
	return ((void *)iod) + iod->offset;
362 363
}

364 365 366 367 368
/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
369
static int nvme_npages(unsigned size, struct nvme_dev *dev)
370
{
371 372
	unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
	return DIV_ROUND_UP(8 * nprps, dev->page_size - 8);
373
}
M
Matthew Wilcox 已提交
374

375
static struct nvme_iod *
376
nvme_alloc_iod(unsigned nseg, unsigned nbytes, struct nvme_dev *dev, gfp_t gfp)
M
Matthew Wilcox 已提交
377
{
378
	struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
379
				sizeof(__le64 *) * nvme_npages(nbytes, dev) +
380 381 382 383 384 385
				sizeof(struct scatterlist) * nseg, gfp);

	if (iod) {
		iod->offset = offsetof(struct nvme_iod, sg[nseg]);
		iod->npages = -1;
		iod->length = nbytes;
K
Keith Busch 已提交
386
		iod->nents = 0;
387
		iod->first_dma = 0ULL;
388 389 390
	}

	return iod;
M
Matthew Wilcox 已提交
391 392
}

V
Vishal Verma 已提交
393
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
M
Matthew Wilcox 已提交
394
{
395
	const int last_prp = dev->page_size / 8 - 1;
396 397 398 399 400 401 402 403 404 405 406 407 408
	int i;
	__le64 **list = iod_list(iod);
	dma_addr_t prp_dma = iod->first_dma;

	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
	for (i = 0; i < iod->npages; i++) {
		__le64 *prp_list = list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
		prp_dma = next_prp_dma;
	}
	kfree(iod);
M
Matthew Wilcox 已提交
409 410
}

K
Keith Busch 已提交
411 412 413 414 415 416 417 418 419 420 421 422
static int nvme_error_status(u16 status)
{
	switch (status & 0x7ff) {
	case NVME_SC_SUCCESS:
		return 0;
	case NVME_SC_CAP_EXCEEDED:
		return -ENOSPC;
	default:
		return -EIO;
	}
}

M
Matias Bjørling 已提交
423
static void req_completion(struct nvme_queue *nvmeq, void *ctx,
M
Matthew Wilcox 已提交
424 425
						struct nvme_completion *cqe)
{
426
	struct nvme_iod *iod = ctx;
M
Matias Bjørling 已提交
427 428 429
	struct request *req = iod->private;
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);

M
Matthew Wilcox 已提交
430 431
	u16 status = le16_to_cpup(&cqe->status) >> 1;

432
	if (unlikely(status)) {
M
Matias Bjørling 已提交
433 434 435 436
		if (!(status & NVME_SC_DNR || blk_noretry_request(req))
		    && (jiffies - req->start_time) < req->timeout) {
			blk_mq_requeue_request(req);
			blk_mq_kick_requeue_list(req->q);
437 438
			return;
		}
M
Matias Bjørling 已提交
439 440 441 442 443 444 445 446 447 448 449 450
		req->errors = nvme_error_status(status);
	} else
		req->errors = 0;

	if (cmd_rq->aborted)
		dev_warn(&nvmeq->dev->pci_dev->dev,
			"completing aborted command with status:%04x\n",
			status);

	if (iod->nents)
		dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
			rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
451
	nvme_free_iod(nvmeq->dev, iod);
K
Keith Busch 已提交
452

M
Matias Bjørling 已提交
453
	blk_mq_complete_request(req);
M
Matthew Wilcox 已提交
454 455
}

456
/* length is in bytes.  gfp flags indicates whether we may sleep. */
457 458
int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
								gfp_t gfp)
M
Matthew Wilcox 已提交
459
{
460
	struct dma_pool *pool;
461 462
	int length = total_len;
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
463 464 465
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
	int offset = offset_in_page(dma_addr);
466
	__le64 *prp_list;
467
	__le64 **list = iod_list(iod);
468
	dma_addr_t prp_dma;
469
	int nprps, i;
470
	u32 page_size = dev->page_size;
M
Matthew Wilcox 已提交
471

472
	length -= (page_size - offset);
M
Matthew Wilcox 已提交
473
	if (length <= 0)
474
		return total_len;
M
Matthew Wilcox 已提交
475

476
	dma_len -= (page_size - offset);
M
Matthew Wilcox 已提交
477
	if (dma_len) {
478
		dma_addr += (page_size - offset);
M
Matthew Wilcox 已提交
479 480 481 482 483 484
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

485
	if (length <= page_size) {
486
		iod->first_dma = dma_addr;
487
		return total_len;
488 489
	}

490
	nprps = DIV_ROUND_UP(length, page_size);
491 492
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
493
		iod->npages = 0;
494 495
	} else {
		pool = dev->prp_page_pool;
496
		iod->npages = 1;
497 498
	}

499 500
	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
	if (!prp_list) {
501
		iod->first_dma = dma_addr;
502
		iod->npages = -1;
503
		return (total_len - length) + page_size;
504
	}
505 506
	list[0] = prp_list;
	iod->first_dma = prp_dma;
507 508
	i = 0;
	for (;;) {
509
		if (i == page_size >> 3) {
510
			__le64 *old_prp_list = prp_list;
511
			prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
512 513 514
			if (!prp_list)
				return total_len - length;
			list[iod->npages++] = prp_list;
515 516 517
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
518 519
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
520 521 522
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
523 524 525 526 527 528 529 530
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
531 532
	}

533
	return total_len;
M
Matthew Wilcox 已提交
534 535
}

M
Matias Bjørling 已提交
536 537 538 539 540 541 542
/*
 * We reuse the small pool to allocate the 16-byte range here as it is not
 * worth having a special pool for these or additional cases to handle freeing
 * the iod.
 */
static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
		struct request *req, struct nvme_iod *iod)
543
{
544 545
	struct nvme_dsm_range *range =
				(struct nvme_dsm_range *)iod_list(iod)[0];
546 547 548
	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];

	range->cattr = cpu_to_le32(0);
M
Matias Bjørling 已提交
549 550
	range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
551 552 553

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->dsm.opcode = nvme_cmd_dsm;
M
Matias Bjørling 已提交
554
	cmnd->dsm.command_id = req->tag;
555 556 557 558 559 560 561 562 563 564
	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
	cmnd->dsm.nr = 0;
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	writel(nvmeq->sq_tail, nvmeq->q_db);
}

M
Matias Bjørling 已提交
565
static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
M
Matthew Wilcox 已提交
566 567 568 569 570 571 572 573 574 575 576 577 578 579
								int cmdid)
{
	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->common.opcode = nvme_cmd_flush;
	cmnd->common.command_id = cmdid;
	cmnd->common.nsid = cpu_to_le32(ns->ns_id);

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	writel(nvmeq->sq_tail, nvmeq->q_db);
}

M
Matias Bjørling 已提交
580 581
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
							struct nvme_ns *ns)
M
Matthew Wilcox 已提交
582
{
M
Matias Bjørling 已提交
583
	struct request *req = iod->private;
M
Matthew Wilcox 已提交
584
	struct nvme_command *cmnd;
M
Matias Bjørling 已提交
585 586
	u16 control = 0;
	u32 dsmgmt = 0;
M
Matthew Wilcox 已提交
587

M
Matias Bjørling 已提交
588
	if (req->cmd_flags & REQ_FUA)
M
Matthew Wilcox 已提交
589
		control |= NVME_RW_FUA;
M
Matias Bjørling 已提交
590
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
M
Matthew Wilcox 已提交
591 592
		control |= NVME_RW_LR;

M
Matias Bjørling 已提交
593
	if (req->cmd_flags & REQ_RAHEAD)
M
Matthew Wilcox 已提交
594 595
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

M
Matthew Wilcox 已提交
596
	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
597
	memset(cmnd, 0, sizeof(*cmnd));
M
Matthew Wilcox 已提交
598

M
Matias Bjørling 已提交
599 600
	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
	cmnd->rw.command_id = req->tag;
M
Matthew Wilcox 已提交
601
	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
602 603
	cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
M
Matias Bjørling 已提交
604 605
	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
M
Matthew Wilcox 已提交
606 607
	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
M
Matthew Wilcox 已提交
608 609 610

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
611
	writel(nvmeq->sq_tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
612

613
	return 0;
614 615
}

M
Matias Bjørling 已提交
616 617
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
618
{
M
Matias Bjørling 已提交
619 620 621 622
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
	struct request *req = bd->rq;
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
623
	struct nvme_iod *iod;
M
Matias Bjørling 已提交
624 625 626
	int psegs = req->nr_phys_segments;
	enum dma_data_direction dma_dir;
	unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
627
						sizeof(struct nvme_dsm_range);
628

629
	iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
630
	if (!iod)
631
		return BLK_MQ_RQ_QUEUE_BUSY;
M
Matias Bjørling 已提交
632 633

	iod->private = req;
634

M
Matias Bjørling 已提交
635
	if (req->cmd_flags & REQ_DISCARD) {
636 637 638 639 640 641 642 643 644
		void *range;
		/*
		 * We reuse the small pool to allocate the 16-byte range here
		 * as it is not worth having a special pool for these or
		 * additional cases to handle freeing the iod.
		 */
		range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
						GFP_ATOMIC,
						&iod->first_dma);
M
Matias Bjørling 已提交
645
		if (!range)
646
			goto retry_cmd;
647 648 649
		iod_list(iod)[0] = (__le64 *)range;
		iod->npages = 0;
	} else if (psegs) {
M
Matias Bjørling 已提交
650 651 652 653
		dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;

		sg_init_table(iod->sg, psegs);
		iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
654 655
		if (!iod->nents)
			goto error_cmd;
M
Matias Bjørling 已提交
656 657

		if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
658
			goto retry_cmd;
M
Matias Bjørling 已提交
659

660 661 662 663 664 665
		if (blk_rq_bytes(req) !=
                    nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
			dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
					iod->nents, dma_dir);
			goto retry_cmd;
		}
666
	}
667

K
Keith Busch 已提交
668
	nvme_set_info(cmd, iod, req_completion);
M
Matias Bjørling 已提交
669 670 671 672 673 674 675 676 677 678 679 680
	spin_lock_irq(&nvmeq->q_lock);
	if (req->cmd_flags & REQ_DISCARD)
		nvme_submit_discard(nvmeq, ns, req, iod);
	else if (req->cmd_flags & REQ_FLUSH)
		nvme_submit_flush(nvmeq, ns, req->tag);
	else
		nvme_submit_iod(nvmeq, iod, ns);

	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
	return BLK_MQ_RQ_QUEUE_OK;

681 682 683 684
 error_cmd:
	nvme_free_iod(nvmeq->dev, iod);
	return BLK_MQ_RQ_QUEUE_ERROR;
 retry_cmd:
685
	nvme_free_iod(nvmeq->dev, iod);
686
	return BLK_MQ_RQ_QUEUE_BUSY;
M
Matthew Wilcox 已提交
687 688
}

689
static int nvme_process_cq(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
690
{
M
Matthew Wilcox 已提交
691
	u16 head, phase;
M
Matthew Wilcox 已提交
692 693

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
694
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
695 696

	for (;;) {
697 698
		void *ctx;
		nvme_completion_fn fn;
M
Matthew Wilcox 已提交
699
		struct nvme_completion cqe = nvmeq->cqes[head];
M
Matthew Wilcox 已提交
700
		if ((le16_to_cpu(cqe.status) & 1) != phase)
M
Matthew Wilcox 已提交
701 702 703 704
			break;
		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
705
			phase = !phase;
M
Matthew Wilcox 已提交
706
		}
M
Matias Bjørling 已提交
707
		ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
708
		fn(nvmeq, ctx, &cqe);
M
Matthew Wilcox 已提交
709 710 711 712 713 714 715 716
	}

	/* If the controller ignores the cq head doorbell and continuously
	 * writes to the queue, it is theoretically possible to wrap around
	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
	 * requires that 0.1% of your interrupts are handled, so this isn't
	 * a big problem.
	 */
M
Matthew Wilcox 已提交
717
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
718
		return 0;
M
Matthew Wilcox 已提交
719

720
	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
M
Matthew Wilcox 已提交
721
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
722
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
723

724 725
	nvmeq->cqe_seen = 1;
	return 1;
M
Matthew Wilcox 已提交
726 727
}

M
Matias Bjørling 已提交
728 729 730 731
/* Admin queue isn't initialized as a request queue. If at some point this
 * happens anyway, make sure to notify the user */
static int nvme_admin_queue_rq(struct blk_mq_hw_ctx *hctx,
			       const struct blk_mq_queue_data *bd)
732
{
M
Matias Bjørling 已提交
733 734
	WARN_ON_ONCE(1);
	return BLK_MQ_RQ_QUEUE_ERROR;
735 736
}

M
Matthew Wilcox 已提交
737
static irqreturn_t nvme_irq(int irq, void *data)
738 739 740 741
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
742 743 744
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
745 746 747 748 749 750 751 752 753 754 755 756 757
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
	struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
	if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
		return IRQ_NONE;
	return IRQ_WAKE_THREAD;
}

M
Matias Bjørling 已提交
758 759
static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
								cmd_info)
760 761
{
	spin_lock_irq(&nvmeq->q_lock);
M
Matias Bjørling 已提交
762
	cancel_cmd_info(cmd_info, NULL);
763 764 765
	spin_unlock_irq(&nvmeq->q_lock);
}

766 767 768 769 770 771
struct sync_cmd_info {
	struct task_struct *task;
	u32 result;
	int status;
};

772
static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
773 774 775 776 777 778 779 780
						struct nvme_completion *cqe)
{
	struct sync_cmd_info *cmdinfo = ctx;
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	wake_up_process(cmdinfo->task);
}

M
Matthew Wilcox 已提交
781 782 783 784
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
M
Matias Bjørling 已提交
785
static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
V
Vishal Verma 已提交
786
						u32 *result, unsigned timeout)
M
Matthew Wilcox 已提交
787
{
M
Matias Bjørling 已提交
788
	int ret;
M
Matthew Wilcox 已提交
789
	struct sync_cmd_info cmdinfo;
M
Matias Bjørling 已提交
790 791
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd_rq->nvmeq;
M
Matthew Wilcox 已提交
792 793 794 795

	cmdinfo.task = current;
	cmdinfo.status = -EINTR;

M
Matias Bjørling 已提交
796 797 798
	cmd->common.command_id = req->tag;

	nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
M
Matthew Wilcox 已提交
799

800
	set_current_state(TASK_KILLABLE);
801 802
	ret = nvme_submit_cmd(nvmeq, cmd);
	if (ret) {
M
Matias Bjørling 已提交
803
		nvme_finish_cmd(nvmeq, req->tag, NULL);
804 805
		set_current_state(TASK_RUNNING);
	}
806
	ret = schedule_timeout(timeout);
M
Matthew Wilcox 已提交
807

808 809 810 811 812 813 814 815 816 817
	/*
	 * Ensure that sync_completion has either run, or that it will
	 * never run.
	 */
	nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));

	/*
	 * We never got the completion
	 */
	if (cmdinfo.status == -EINTR)
818 819
		return -EINTR;

M
Matthew Wilcox 已提交
820 821 822 823 824 825
	if (result)
		*result = cmdinfo.result;

	return cmdinfo.status;
}

M
Matias Bjørling 已提交
826 827 828 829 830 831 832
static int nvme_submit_async_admin_req(struct nvme_dev *dev)
{
	struct nvme_queue *nvmeq = dev->queues[0];
	struct nvme_command c;
	struct nvme_cmd_info *cmd_info;
	struct request *req;

833
	req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false);
834 835
	if (IS_ERR(req))
		return PTR_ERR(req);
M
Matias Bjørling 已提交
836

K
Keith Busch 已提交
837
	req->cmd_flags |= REQ_NO_TIMEOUT;
M
Matias Bjørling 已提交
838 839 840 841 842 843 844 845 846 847 848
	cmd_info = blk_mq_rq_to_pdu(req);
	nvme_set_info(cmd_info, req, async_req_completion);

	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
	c.common.command_id = req->tag;

	return __nvme_submit_cmd(nvmeq, &c);
}

static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
K
Keith Busch 已提交
849 850 851
			struct nvme_command *cmd,
			struct async_cmd_info *cmdinfo, unsigned timeout)
{
M
Matias Bjørling 已提交
852 853 854
	struct nvme_queue *nvmeq = dev->queues[0];
	struct request *req;
	struct nvme_cmd_info *cmd_rq;
K
Keith Busch 已提交
855

M
Matias Bjørling 已提交
856
	req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
857 858
	if (IS_ERR(req))
		return PTR_ERR(req);
M
Matias Bjørling 已提交
859 860 861 862 863

	req->timeout = timeout;
	cmd_rq = blk_mq_rq_to_pdu(req);
	cmdinfo->req = req;
	nvme_set_info(cmd_rq, cmdinfo, async_completion);
K
Keith Busch 已提交
864
	cmdinfo->status = -EINTR;
M
Matias Bjørling 已提交
865 866 867

	cmd->common.command_id = req->tag;

868
	return nvme_submit_cmd(nvmeq, cmd);
K
Keith Busch 已提交
869 870
}

871
static int __nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
M
Matias Bjørling 已提交
872
						u32 *result, unsigned timeout)
M
Matthew Wilcox 已提交
873
{
M
Matias Bjørling 已提交
874 875 876 877
	int res;
	struct request *req;

	req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
878 879
	if (IS_ERR(req))
		return PTR_ERR(req);
M
Matias Bjørling 已提交
880
	res = nvme_submit_sync_cmd(req, cmd, result, timeout);
881
	blk_mq_free_request(req);
M
Matias Bjørling 已提交
882
	return res;
883 884
}

M
Matias Bjørling 已提交
885
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
886 887
								u32 *result)
{
M
Matias Bjørling 已提交
888
	return __nvme_submit_admin_cmd(dev, cmd, result, ADMIN_TIMEOUT);
M
Matthew Wilcox 已提交
889 890
}

M
Matias Bjørling 已提交
891 892
int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
					struct nvme_command *cmd, u32 *result)
K
Keith Busch 已提交
893
{
M
Matias Bjørling 已提交
894 895 896 897 898
	int res;
	struct request *req;

	req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT),
									false);
899 900
	if (IS_ERR(req))
		return PTR_ERR(req);
M
Matias Bjørling 已提交
901
	res = nvme_submit_sync_cmd(req, cmd, result, NVME_IO_TIMEOUT);
902
	blk_mq_free_request(req);
M
Matias Bjørling 已提交
903
	return res;
K
Keith Busch 已提交
904 905
}

M
Matthew Wilcox 已提交
906 907 908 909 910 911 912 913
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

M
Matias Bjørling 已提交
914
	return nvme_submit_admin_cmd(dev, &c, NULL);
M
Matthew Wilcox 已提交
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

M
Matias Bjørling 已提交
931
	return nvme_submit_admin_cmd(dev, &c, NULL);
M
Matthew Wilcox 已提交
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

M
Matias Bjørling 已提交
948
	return nvme_submit_admin_cmd(dev, &c, NULL);
M
Matthew Wilcox 已提交
949 950 951 952 953 954 955 956 957 958 959 960
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

V
Vishal Verma 已提交
961
int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
962 963 964 965 966 967 968 969 970 971 972 973 974
							dma_addr_t dma_addr)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.prp1 = cpu_to_le64(dma_addr);
	c.identify.cns = cpu_to_le32(cns);

	return nvme_submit_admin_cmd(dev, &c, NULL);
}

V
Vishal Verma 已提交
975
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
976
					dma_addr_t dma_addr, u32 *result)
977 978 979 980 981
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_get_features;
982
	c.features.nsid = cpu_to_le32(nsid);
983 984 985
	c.features.prp1 = cpu_to_le64(dma_addr);
	c.features.fid = cpu_to_le32(fid);

986
	return nvme_submit_admin_cmd(dev, &c, result);
987 988
}

V
Vishal Verma 已提交
989 990
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
					dma_addr_t dma_addr, u32 *result)
991 992 993 994 995 996 997 998 999
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_set_features;
	c.features.prp1 = cpu_to_le64(dma_addr);
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1000 1001 1002
	return nvme_submit_admin_cmd(dev, &c, result);
}

K
Keith Busch 已提交
1003
/**
M
Matias Bjørling 已提交
1004
 * nvme_abort_req - Attempt aborting a request
K
Keith Busch 已提交
1005 1006 1007 1008
 *
 * Schedule controller reset if the command was already aborted once before and
 * still hasn't been returned to the driver, or if this is the admin queue.
 */
M
Matias Bjørling 已提交
1009
static void nvme_abort_req(struct request *req)
K
Keith Busch 已提交
1010
{
M
Matias Bjørling 已提交
1011 1012
	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd_rq->nvmeq;
K
Keith Busch 已提交
1013
	struct nvme_dev *dev = nvmeq->dev;
M
Matias Bjørling 已提交
1014 1015 1016
	struct request *abort_req;
	struct nvme_cmd_info *abort_cmd;
	struct nvme_command cmd;
K
Keith Busch 已提交
1017

M
Matias Bjørling 已提交
1018
	if (!nvmeq->qid || cmd_rq->aborted) {
K
Keith Busch 已提交
1019 1020 1021 1022
		if (work_busy(&dev->reset_work))
			return;
		list_del_init(&dev->node);
		dev_warn(&dev->pci_dev->dev,
M
Matias Bjørling 已提交
1023 1024
			"I/O %d QID %d timeout, reset controller\n",
							req->tag, nvmeq->qid);
T
Tejun Heo 已提交
1025
		dev->reset_workfn = nvme_reset_failed_dev;
K
Keith Busch 已提交
1026 1027 1028 1029 1030 1031 1032
		queue_work(nvme_workq, &dev->reset_work);
		return;
	}

	if (!dev->abort_limit)
		return;

M
Matias Bjørling 已提交
1033 1034
	abort_req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC,
									false);
1035
	if (IS_ERR(abort_req))
K
Keith Busch 已提交
1036 1037
		return;

M
Matias Bjørling 已提交
1038 1039 1040
	abort_cmd = blk_mq_rq_to_pdu(abort_req);
	nvme_set_info(abort_cmd, abort_req, abort_completion);

K
Keith Busch 已提交
1041 1042
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
M
Matias Bjørling 已提交
1043
	cmd.abort.cid = req->tag;
K
Keith Busch 已提交
1044
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
M
Matias Bjørling 已提交
1045
	cmd.abort.command_id = abort_req->tag;
K
Keith Busch 已提交
1046 1047

	--dev->abort_limit;
M
Matias Bjørling 已提交
1048
	cmd_rq->aborted = 1;
K
Keith Busch 已提交
1049

M
Matias Bjørling 已提交
1050
	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
K
Keith Busch 已提交
1051
							nvmeq->qid);
M
Matias Bjørling 已提交
1052 1053 1054 1055
	if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
		dev_warn(nvmeq->q_dmadev,
				"Could not abort I/O %d QID %d",
				req->tag, nvmeq->qid);
1056
		blk_mq_free_request(abort_req);
M
Matias Bjørling 已提交
1057
	}
K
Keith Busch 已提交
1058 1059
}

M
Matias Bjørling 已提交
1060 1061
static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
				struct request *req, void *data, bool reserved)
1062
{
M
Matias Bjørling 已提交
1063 1064 1065 1066 1067 1068 1069
	struct nvme_queue *nvmeq = data;
	void *ctx;
	nvme_completion_fn fn;
	struct nvme_cmd_info *cmd;
	static struct nvme_completion cqe = {
		.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
	};
1070

M
Matias Bjørling 已提交
1071
	cmd = blk_mq_rq_to_pdu(req);
1072

M
Matias Bjørling 已提交
1073 1074 1075 1076 1077 1078 1079
	if (cmd->ctx == CMD_CTX_CANCELLED)
		return;

	dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
						req->tag, nvmeq->qid);
	ctx = cancel_cmd_info(cmd, &fn);
	fn(nvmeq, ctx, &cqe);
1080 1081
}

M
Matias Bjørling 已提交
1082
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1083
{
M
Matias Bjørling 已提交
1084 1085 1086 1087 1088
	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = cmd->nvmeq;

	dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
							nvmeq->qid);
K
Keith Busch 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098

	if (!nvmeq->dev->initialized) {
		/*
		 * Force cancelled command frees the request, which requires we
		 * return BLK_EH_NOT_HANDLED.
		 */
		nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
		return BLK_EH_NOT_HANDLED;
	}
	nvme_abort_req(req);
M
Matias Bjørling 已提交
1099 1100 1101 1102 1103 1104 1105 1106

	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
}
1107

M
Matias Bjørling 已提交
1108 1109
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
1110 1111 1112 1113 1114 1115 1116
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

1117
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1118
{
K
Keith Busch 已提交
1119 1120 1121
	LLIST_HEAD(q_list);
	struct nvme_queue *nvmeq, *next;
	struct llist_node *entry;
1122 1123
	int i;

1124
	for (i = dev->queue_count - 1; i >= lowest; i--) {
M
Matias Bjørling 已提交
1125
		struct nvme_queue *nvmeq = dev->queues[i];
K
Keith Busch 已提交
1126
		llist_add(&nvmeq->node, &q_list);
1127
		dev->queue_count--;
M
Matias Bjørling 已提交
1128
		dev->queues[i] = NULL;
1129
	}
K
Keith Busch 已提交
1130 1131 1132 1133
	synchronize_rcu();
	entry = llist_del_all(&q_list);
	llist_for_each_entry_safe(nvmeq, next, entry, node)
		nvme_free_queue(nvmeq);
1134 1135
}

K
Keith Busch 已提交
1136 1137 1138 1139 1140
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
1141
{
K
Keith Busch 已提交
1142
	int vector;
M
Matthew Wilcox 已提交
1143

1144
	spin_lock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
1145 1146 1147 1148 1149
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
	vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
K
Keith Busch 已提交
1150
	nvmeq->dev->online_queues--;
K
Keith Busch 已提交
1151
	nvmeq->cq_vector = -1;
1152 1153
	spin_unlock_irq(&nvmeq->q_lock);

M
Matthew Wilcox 已提交
1154 1155
	irq_set_affinity_hint(vector, NULL);
	free_irq(vector, nvmeq);
M
Matthew Wilcox 已提交
1156

K
Keith Busch 已提交
1157 1158
	return 0;
}
M
Matthew Wilcox 已提交
1159

K
Keith Busch 已提交
1160 1161
static void nvme_clear_queue(struct nvme_queue *nvmeq)
{
M
Matias Bjørling 已提交
1162 1163
	struct blk_mq_hw_ctx *hctx = nvmeq->hctx;

1164 1165
	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
M
Matias Bjørling 已提交
1166 1167
	if (hctx && hctx->tags)
		blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
1168
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1169 1170
}

K
Keith Busch 已提交
1171 1172
static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
M
Matias Bjørling 已提交
1173
	struct nvme_queue *nvmeq = dev->queues[qid];
K
Keith Busch 已提交
1174 1175 1176 1177 1178 1179

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

K
Keith Busch 已提交
1180 1181 1182
	/* Don't tell the adapter to delete the admin queue.
	 * Don't tell a removed adapter to delete IO queues. */
	if (qid && readl(&dev->bar->csts) != -1) {
M
Matthew Wilcox 已提交
1183 1184 1185
		adapter_delete_sq(dev, qid);
		adapter_delete_cq(dev, qid);
	}
K
Keith Busch 已提交
1186
	nvme_clear_queue(nvmeq);
M
Matthew Wilcox 已提交
1187 1188 1189
}

static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
K
Keith Busch 已提交
1190
							int depth)
M
Matthew Wilcox 已提交
1191 1192
{
	struct device *dmadev = &dev->pci_dev->dev;
M
Matias Bjørling 已提交
1193
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
M
Matthew Wilcox 已提交
1194 1195 1196
	if (!nvmeq)
		return NULL;

J
Joe Perches 已提交
1197 1198
	nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth),
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
M
Matthew Wilcox 已提交
1199 1200 1201 1202 1203 1204 1205 1206 1207
	if (!nvmeq->cqes)
		goto free_nvmeq;

	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
	if (!nvmeq->sq_cmds)
		goto free_cqdma;

	nvmeq->q_dmadev = dmadev;
M
Matthew Wilcox 已提交
1208
	nvmeq->dev = dev;
1209 1210
	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
			dev->instance, qid);
M
Matthew Wilcox 已提交
1211 1212
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1213
	nvmeq->cq_phase = 1;
1214
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1215
	nvmeq->q_depth = depth;
K
Keith Busch 已提交
1216
	nvmeq->qid = qid;
1217
	dev->queue_count++;
M
Matias Bjørling 已提交
1218
	dev->queues[qid] = nvmeq;
M
Matthew Wilcox 已提交
1219 1220 1221 1222

	return nvmeq;

 free_cqdma:
1223
	dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1224 1225 1226 1227 1228 1229
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

1230 1231 1232
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
							const char *name)
{
1233 1234
	if (use_threaded_interrupts)
		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1235
					nvme_irq_check, nvme_irq, IRQF_SHARED,
1236
					name, nvmeq);
1237
	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1238
				IRQF_SHARED, name, nvmeq);
1239 1240
}

1241
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1242
{
1243
	struct nvme_dev *dev = nvmeq->dev;
M
Matthew Wilcox 已提交
1244

1245
	spin_lock_irq(&nvmeq->q_lock);
1246 1247 1248
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1249
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1250
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
K
Keith Busch 已提交
1251
	dev->online_queues++;
1252
	spin_unlock_irq(&nvmeq->q_lock);
1253 1254 1255 1256 1257 1258
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1259

K
Keith Busch 已提交
1260
	nvmeq->cq_vector = qid - 1;
M
Matthew Wilcox 已提交
1261 1262
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1263
		return result;
M
Matthew Wilcox 已提交
1264 1265 1266 1267 1268

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1269
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
M
Matthew Wilcox 已提交
1270 1271 1272
	if (result < 0)
		goto release_sq;

1273 1274
	nvme_init_queue(nvmeq, qid);
	return result;
M
Matthew Wilcox 已提交
1275 1276 1277 1278 1279

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1280
	return result;
M
Matthew Wilcox 已提交
1281 1282
}

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
{
	unsigned long timeout;
	u32 bit = enabled ? NVME_CSTS_RDY : 0;

	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;

	while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
			dev_err(&dev->pci_dev->dev,
M
Matthew Wilcox 已提交
1296 1297
				"Device not ready; aborting %s\n", enabled ?
						"initialisation" : "reset");
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
			return -ENODEV;
		}
	}

	return 0;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
{
1313 1314 1315
	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
	dev->ctrl_config &= ~NVME_CC_ENABLE;
	writel(dev->ctrl_config, &dev->bar->cc);
1316

1317 1318 1319 1320 1321
	return nvme_wait_ready(dev, cap, false);
}

static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
{
1322 1323 1324 1325
	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
	dev->ctrl_config |= NVME_CC_ENABLE;
	writel(dev->ctrl_config, &dev->bar->cc);

1326 1327 1328
	return nvme_wait_ready(dev, cap, true);
}

K
Keith Busch 已提交
1329 1330 1331 1332
static int nvme_shutdown_ctrl(struct nvme_dev *dev)
{
	unsigned long timeout;

1333 1334 1335 1336
	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
	dev->ctrl_config |= NVME_CC_SHN_NORMAL;

	writel(dev->ctrl_config, &dev->bar->cc);
K
Keith Busch 已提交
1337

1338
	timeout = SHUTDOWN_TIMEOUT + jiffies;
K
Keith Busch 已提交
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
							NVME_CSTS_SHST_CMPLT) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
			dev_err(&dev->pci_dev->dev,
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return 0;
}

M
Matias Bjørling 已提交
1354 1355 1356 1357
static struct blk_mq_ops nvme_mq_admin_ops = {
	.queue_rq	= nvme_admin_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_hctx	= nvme_admin_init_hctx,
J
Jens Axboe 已提交
1358
	.exit_hctx	= nvme_exit_hctx,
M
Matias Bjørling 已提交
1359 1360 1361 1362 1363 1364 1365 1366
	.init_request	= nvme_admin_init_request,
	.timeout	= nvme_timeout,
};

static struct blk_mq_ops nvme_mq_ops = {
	.queue_rq	= nvme_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_hctx	= nvme_init_hctx,
J
Jens Axboe 已提交
1367
	.exit_hctx	= nvme_exit_hctx,
M
Matias Bjørling 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	.init_request	= nvme_init_request,
	.timeout	= nvme_timeout,
};

static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
	if (!dev->admin_q) {
		dev->admin_tagset.ops = &nvme_mq_admin_ops;
		dev->admin_tagset.nr_hw_queues = 1;
		dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
		dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
		dev->admin_tagset.cmd_size = sizeof(struct nvme_cmd_info);
		dev->admin_tagset.driver_data = dev;

		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
			return -ENOMEM;

		dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
1387
		if (IS_ERR(dev->admin_q)) {
M
Matias Bjørling 已提交
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
			blk_mq_free_tag_set(&dev->admin_tagset);
			return -ENOMEM;
		}
	}

	return 0;
}

static void nvme_free_admin_tags(struct nvme_dev *dev)
{
	if (dev->admin_q)
		blk_mq_free_tag_set(&dev->admin_tagset);
}

1402
static int nvme_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1403
{
1404
	int result;
M
Matthew Wilcox 已提交
1405
	u32 aqa;
1406
	u64 cap = readq(&dev->bar->cap);
M
Matthew Wilcox 已提交
1407
	struct nvme_queue *nvmeq;
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	unsigned page_shift = PAGE_SHIFT;
	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;

	if (page_shift < dev_page_min) {
		dev_err(&dev->pci_dev->dev,
				"Minimum device page size (%u) too large for "
				"host (%u)\n", 1 << dev_page_min,
				1 << page_shift);
		return -ENODEV;
	}
	if (page_shift > dev_page_max) {
		dev_info(&dev->pci_dev->dev,
				"Device maximum page size (%u) smaller than "
				"host (%u); enabling work-around\n",
				1 << dev_page_max, 1 << page_shift);
		page_shift = dev_page_max;
	}
M
Matthew Wilcox 已提交
1426

1427 1428 1429
	result = nvme_disable_ctrl(dev, cap);
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1430

M
Matias Bjørling 已提交
1431
	nvmeq = dev->queues[0];
1432
	if (!nvmeq) {
K
Keith Busch 已提交
1433
		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1434 1435 1436
		if (!nvmeq)
			return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1437 1438 1439 1440

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

1441 1442
	dev->page_size = 1 << page_shift;

1443
	dev->ctrl_config = NVME_CC_CSS_NVM;
1444
	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
M
Matthew Wilcox 已提交
1445
	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1446
	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
M
Matthew Wilcox 已提交
1447 1448 1449 1450 1451

	writel(aqa, &dev->bar->aqa);
	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);

1452
	result = nvme_enable_ctrl(dev, cap);
1453
	if (result)
M
Matias Bjørling 已提交
1454 1455 1456 1457 1458
		goto free_nvmeq;

	result = nvme_alloc_admin_tags(dev);
	if (result)
		goto free_nvmeq;
1459

K
Keith Busch 已提交
1460
	nvmeq->cq_vector = 0;
1461
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1462
	if (result)
M
Matias Bjørling 已提交
1463
		goto free_tags;
1464

M
Matthew Wilcox 已提交
1465
	return result;
M
Matias Bjørling 已提交
1466 1467 1468 1469 1470 1471

 free_tags:
	nvme_free_admin_tags(dev);
 free_nvmeq:
	nvme_free_queues(dev, 0);
	return result;
M
Matthew Wilcox 已提交
1472 1473
}

V
Vishal Verma 已提交
1474
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1475
				unsigned long addr, unsigned length)
M
Matthew Wilcox 已提交
1476
{
1477
	int i, err, count, nents, offset;
1478 1479
	struct scatterlist *sg;
	struct page **pages;
1480
	struct nvme_iod *iod;
1481 1482

	if (addr & 3)
1483
		return ERR_PTR(-EINVAL);
1484
	if (!length || length > INT_MAX - PAGE_SIZE)
1485
		return ERR_PTR(-EINVAL);
1486

1487
	offset = offset_in_page(addr);
1488 1489
	count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
	pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1490 1491
	if (!pages)
		return ERR_PTR(-ENOMEM);
1492 1493 1494 1495 1496 1497 1498

	err = get_user_pages_fast(addr, count, 1, pages);
	if (err < count) {
		count = err;
		err = -EFAULT;
		goto put_pages;
	}
1499

1500
	err = -ENOMEM;
1501
	iod = nvme_alloc_iod(count, length, dev, GFP_KERNEL);
1502 1503 1504
	if (!iod)
		goto put_pages;

1505
	sg = iod->sg;
1506
	sg_init_table(sg, count);
1507 1508
	for (i = 0; i < count; i++) {
		sg_set_page(&sg[i], pages[i],
1509 1510
			    min_t(unsigned, length, PAGE_SIZE - offset),
			    offset);
1511 1512
		length -= (PAGE_SIZE - offset);
		offset = 0;
1513
	}
1514
	sg_mark_end(&sg[i - 1]);
1515
	iod->nents = count;
1516 1517 1518

	nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1519
	if (!nents)
1520
		goto free_iod;
M
Matthew Wilcox 已提交
1521

1522
	kfree(pages);
1523
	return iod;
M
Matthew Wilcox 已提交
1524

1525 1526
 free_iod:
	kfree(iod);
1527 1528 1529 1530
 put_pages:
	for (i = 0; i < count; i++)
		put_page(pages[i]);
	kfree(pages);
1531
	return ERR_PTR(err);
1532
}
M
Matthew Wilcox 已提交
1533

V
Vishal Verma 已提交
1534
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1535
			struct nvme_iod *iod)
1536
{
1537
	int i;
M
Matthew Wilcox 已提交
1538

1539 1540
	dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1541

1542 1543
	for (i = 0; i < iod->nents; i++)
		put_page(sg_page(&iod->sg[i]));
1544
}
M
Matthew Wilcox 已提交
1545

M
Matthew Wilcox 已提交
1546 1547 1548 1549 1550
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_user_io io;
	struct nvme_command c;
1551 1552 1553 1554 1555
	unsigned length, meta_len;
	int status, i;
	struct nvme_iod *iod, *meta_iod = NULL;
	dma_addr_t meta_dma_addr;
	void *meta, *uninitialized_var(meta_mem);
M
Matthew Wilcox 已提交
1556 1557 1558

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
1559
	length = (io.nblocks + 1) << ns->lba_shift;
1560 1561 1562 1563
	meta_len = (io.nblocks + 1) * ns->ms;

	if (meta_len && ((io.metadata & 3) || !io.metadata))
		return -EINVAL;
1564 1565 1566 1567

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
M
Matthew Wilcox 已提交
1568
	case nvme_cmd_compare:
1569
		iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
M
Matthew Wilcox 已提交
1570
		break;
1571
	default:
M
Matthew Wilcox 已提交
1572
		return -EINVAL;
1573 1574
	}

1575 1576
	if (IS_ERR(iod))
		return PTR_ERR(iod);
M
Matthew Wilcox 已提交
1577 1578 1579 1580

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
1581
	c.rw.nsid = cpu_to_le32(ns->ns_id);
M
Matthew Wilcox 已提交
1582
	c.rw.slba = cpu_to_le64(io.slba);
1583
	c.rw.length = cpu_to_le16(io.nblocks);
M
Matthew Wilcox 已提交
1584
	c.rw.control = cpu_to_le16(io.control);
1585 1586 1587 1588
	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);
1589 1590

	if (meta_len) {
K
Keith Busch 已提交
1591 1592
		meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
								meta_len);
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
		if (IS_ERR(meta_iod)) {
			status = PTR_ERR(meta_iod);
			meta_iod = NULL;
			goto unmap;
		}

		meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
						&meta_dma_addr, GFP_KERNEL);
		if (!meta_mem) {
			status = -ENOMEM;
			goto unmap;
		}

		if (io.opcode & 1) {
			int meta_offset = 0;

			for (i = 0; i < meta_iod->nents; i++) {
				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
						meta_iod->sg[i].offset;
				memcpy(meta_mem + meta_offset, meta,
						meta_iod->sg[i].length);
				kunmap_atomic(meta);
				meta_offset += meta_iod->sg[i].length;
			}
		}

		c.rw.metadata = cpu_to_le64(meta_dma_addr);
	}

1622 1623 1624
	length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
	c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	c.rw.prp2 = cpu_to_le64(iod->first_dma);
M
Matthew Wilcox 已提交
1625

1626 1627 1628
	if (length != (io.nblocks + 1) << ns->lba_shift)
		status = -ENOMEM;
	else
M
Matias Bjørling 已提交
1629
		status = nvme_submit_io_cmd(dev, ns, &c, NULL);
M
Matthew Wilcox 已提交
1630

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
	if (meta_len) {
		if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
			int meta_offset = 0;

			for (i = 0; i < meta_iod->nents; i++) {
				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
						meta_iod->sg[i].offset;
				memcpy(meta, meta_mem + meta_offset,
						meta_iod->sg[i].length);
				kunmap_atomic(meta);
				meta_offset += meta_iod->sg[i].length;
			}
		}

		dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
								meta_dma_addr);
	}

 unmap:
1650
	nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1651
	nvme_free_iod(dev, iod);
1652 1653 1654 1655 1656 1657

	if (meta_iod) {
		nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
		nvme_free_iod(dev, meta_iod);
	}

M
Matthew Wilcox 已提交
1658 1659 1660
	return status;
}

M
Matias Bjørling 已提交
1661 1662
static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
			struct nvme_passthru_cmd __user *ucmd)
1663
{
1664
	struct nvme_passthru_cmd cmd;
1665
	struct nvme_command c;
1666
	int status, length;
1667
	struct nvme_iod *uninitialized_var(iod);
1668
	unsigned timeout;
1669

M
Matthew Wilcox 已提交
1670 1671 1672
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1673 1674 1675
		return -EFAULT;

	memset(&c, 0, sizeof(c));
M
Matthew Wilcox 已提交
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);

	length = cmd.data_len;
	if (cmd.data_len) {
1690 1691
		iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
								length);
1692 1693
		if (IS_ERR(iod))
			return PTR_ERR(iod);
1694 1695 1696
		length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
		c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
		c.common.prp2 = cpu_to_le64(iod->first_dma);
M
Matthew Wilcox 已提交
1697 1698
	}

1699 1700
	timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
								ADMIN_TIMEOUT;
M
Matias Bjørling 已提交
1701

M
Matthew Wilcox 已提交
1702
	if (length != cmd.data_len)
1703
		status = -ENOMEM;
M
Matias Bjørling 已提交
1704 1705 1706 1707 1708
	else if (ns) {
		struct request *req;

		req = blk_mq_alloc_request(ns->queue, WRITE,
						(GFP_KERNEL|__GFP_WAIT), false);
1709 1710
		if (IS_ERR(req))
			status = PTR_ERR(req);
M
Matias Bjørling 已提交
1711 1712 1713
		else {
			status = nvme_submit_sync_cmd(req, &c, &cmd.result,
								timeout);
1714
			blk_mq_free_request(req);
M
Matias Bjørling 已提交
1715 1716 1717
		}
	} else
		status = __nvme_submit_admin_cmd(dev, &c, &cmd.result, timeout);
1718

M
Matthew Wilcox 已提交
1719
	if (cmd.data_len) {
1720
		nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1721
		nvme_free_iod(dev, iod);
M
Matthew Wilcox 已提交
1722
	}
1723

1724
	if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1725 1726 1727
							sizeof(cmd.result)))
		status = -EFAULT;

1728 1729 1730
	return status;
}

M
Matthew Wilcox 已提交
1731 1732 1733 1734 1735 1736
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
							unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
M
Matthew Wilcox 已提交
1737
	case NVME_IOCTL_ID:
1738
		force_successful_syscall_return();
M
Matthew Wilcox 已提交
1739 1740
		return ns->ns_id;
	case NVME_IOCTL_ADMIN_CMD:
M
Matias Bjørling 已提交
1741
		return nvme_user_cmd(ns->dev, NULL, (void __user *)arg);
1742
	case NVME_IOCTL_IO_CMD:
M
Matias Bjørling 已提交
1743
		return nvme_user_cmd(ns->dev, ns, (void __user *)arg);
M
Matthew Wilcox 已提交
1744 1745
	case NVME_IOCTL_SUBMIT_IO:
		return nvme_submit_io(ns, (void __user *)arg);
V
Vishal Verma 已提交
1746 1747 1748 1749
	case SG_GET_VERSION_NUM:
		return nvme_sg_get_version_num((void __user *)arg);
	case SG_IO:
		return nvme_sg_io(ns, (void __user *)arg);
M
Matthew Wilcox 已提交
1750 1751 1752 1753 1754
	default:
		return -ENOTTY;
	}
}

K
Keith Busch 已提交
1755 1756 1757 1758 1759 1760
#ifdef CONFIG_COMPAT
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
					unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case SG_IO:
1761
		return -ENOIOCTLCMD;
K
Keith Busch 已提交
1762 1763 1764 1765 1766 1767 1768
	}
	return nvme_ioctl(bdev, mode, cmd, arg);
}
#else
#define nvme_compat_ioctl	NULL
#endif

1769 1770
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
1771 1772
	int ret = 0;
	struct nvme_ns *ns;
1773

1774 1775 1776 1777 1778 1779 1780 1781 1782
	spin_lock(&dev_list_lock);
	ns = bdev->bd_disk->private_data;
	if (!ns)
		ret = -ENXIO;
	else if (!kref_get_unless_zero(&ns->dev->kref))
		ret = -ENXIO;
	spin_unlock(&dev_list_lock);

	return ret;
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
}

static void nvme_free_dev(struct kref *kref);

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
	struct nvme_ns *ns = disk->private_data;
	struct nvme_dev *dev = ns->dev;

	kref_put(&dev->kref, nvme_free_dev);
}

K
Keith Busch 已提交
1795 1796 1797 1798 1799 1800 1801 1802 1803
static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	return 0;
}

K
Keith Busch 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
static int nvme_revalidate_disk(struct gendisk *disk)
{
	struct nvme_ns *ns = disk->private_data;
	struct nvme_dev *dev = ns->dev;
	struct nvme_id_ns *id;
	dma_addr_t dma_addr;
	int lbaf;

	id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
								GFP_KERNEL);
	if (!id) {
		dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n",
								__func__);
		return 0;
	}

	if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
		goto free;

	lbaf = id->flbas & 0xf;
	ns->lba_shift = id->lbaf[lbaf].ds;

	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 free:
	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
	return 0;
}

M
Matthew Wilcox 已提交
1833 1834 1835
static const struct block_device_operations nvme_fops = {
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
K
Keith Busch 已提交
1836
	.compat_ioctl	= nvme_compat_ioctl,
1837 1838
	.open		= nvme_open,
	.release	= nvme_release,
K
Keith Busch 已提交
1839
	.getgeo		= nvme_getgeo,
K
Keith Busch 已提交
1840
	.revalidate_disk= nvme_revalidate_disk,
M
Matthew Wilcox 已提交
1841 1842
};

1843 1844
static int nvme_kthread(void *data)
{
1845
	struct nvme_dev *dev, *next;
1846 1847

	while (!kthread_should_stop()) {
1848
		set_current_state(TASK_INTERRUPTIBLE);
1849
		spin_lock(&dev_list_lock);
1850
		list_for_each_entry_safe(dev, next, &dev_list, node) {
1851
			int i;
1852 1853 1854 1855 1856 1857
			if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
							dev->initialized) {
				if (work_busy(&dev->reset_work))
					continue;
				list_del_init(&dev->node);
				dev_warn(&dev->pci_dev->dev,
M
Matias Bjørling 已提交
1858 1859
					"Failed status: %x, reset controller\n",
					readl(&dev->bar->csts));
T
Tejun Heo 已提交
1860
				dev->reset_workfn = nvme_reset_failed_dev;
1861 1862 1863
				queue_work(nvme_workq, &dev->reset_work);
				continue;
			}
1864
			for (i = 0; i < dev->queue_count; i++) {
M
Matias Bjørling 已提交
1865
				struct nvme_queue *nvmeq = dev->queues[i];
1866 1867
				if (!nvmeq)
					continue;
1868
				spin_lock_irq(&nvmeq->q_lock);
1869
				nvme_process_cq(nvmeq);
K
Keith Busch 已提交
1870 1871

				while ((i == 0) && (dev->event_limit > 0)) {
M
Matias Bjørling 已提交
1872
					if (nvme_submit_async_admin_req(dev))
K
Keith Busch 已提交
1873 1874 1875
						break;
					dev->event_limit--;
				}
1876 1877 1878 1879
				spin_unlock_irq(&nvmeq->q_lock);
			}
		}
		spin_unlock(&dev_list_lock);
1880
		schedule_timeout(round_jiffies_relative(HZ));
1881 1882 1883 1884
	}
	return 0;
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
static void nvme_config_discard(struct nvme_ns *ns)
{
	u32 logical_block_size = queue_logical_block_size(ns->queue);
	ns->queue->limits.discard_zeroes_data = 0;
	ns->queue->limits.discard_alignment = logical_block_size;
	ns->queue->limits.discard_granularity = logical_block_size;
	ns->queue->limits.max_discard_sectors = 0xffffffff;
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}

1895
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
M
Matthew Wilcox 已提交
1896 1897 1898 1899
			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
M
Matias Bjørling 已提交
1900
	int node = dev_to_node(&dev->pci_dev->dev);
M
Matthew Wilcox 已提交
1901 1902 1903 1904 1905
	int lbaf;

	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
		return NULL;

M
Matias Bjørling 已提交
1906
	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
1907 1908
	if (!ns)
		return NULL;
M
Matias Bjørling 已提交
1909
	ns->queue = blk_mq_init_queue(&dev->tagset);
1910
	if (IS_ERR(ns->queue))
M
Matthew Wilcox 已提交
1911
		goto out_free_ns;
M
Matthew Wilcox 已提交
1912 1913
	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
M
Matias Bjørling 已提交
1914
	queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
M
Matthew Wilcox 已提交
1915 1916 1917
	ns->dev = dev;
	ns->queue->queuedata = ns;

M
Matias Bjørling 已提交
1918
	disk = alloc_disk_node(0, node);
M
Matthew Wilcox 已提交
1919 1920
	if (!disk)
		goto out_free_queue;
M
Matias Bjørling 已提交
1921

1922
	ns->ns_id = nsid;
M
Matthew Wilcox 已提交
1923 1924 1925
	ns->disk = disk;
	lbaf = id->flbas & 0xf;
	ns->lba_shift = id->lbaf[lbaf].ds;
1926
	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1927
	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
K
Keith Busch 已提交
1928 1929
	if (dev->max_hw_sectors)
		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
M
Matias Bjørling 已提交
1930 1931
	if (dev->stripe_size)
		blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
1932 1933
	if (dev->vwc & NVME_CTRL_VWC_PRESENT)
		blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
M
Matthew Wilcox 已提交
1934 1935

	disk->major = nvme_major;
1936
	disk->first_minor = 0;
M
Matthew Wilcox 已提交
1937 1938 1939
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
1940
	disk->driverfs_dev = &dev->pci_dev->dev;
1941
	disk->flags = GENHD_FL_EXT_DEVT;
1942
	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
M
Matthew Wilcox 已提交
1943 1944
	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

1945 1946 1947
	if (dev->oncs & NVME_CTRL_ONCS_DSM)
		nvme_config_discard(ns);

M
Matthew Wilcox 已提交
1948 1949 1950 1951 1952 1953 1954 1955 1956
	return ns;

 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
	return NULL;
}

K
Keith Busch 已提交
1957 1958
static void nvme_create_io_queues(struct nvme_dev *dev)
{
M
Matias Bjørling 已提交
1959
	unsigned i;
K
Keith Busch 已提交
1960

M
Matias Bjørling 已提交
1961
	for (i = dev->queue_count; i <= dev->max_qid; i++)
K
Keith Busch 已提交
1962
		if (!nvme_alloc_queue(dev, i, dev->q_depth))
K
Keith Busch 已提交
1963 1964
			break;

M
Matias Bjørling 已提交
1965 1966
	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
		if (nvme_create_queue(dev->queues[i], i))
K
Keith Busch 已提交
1967 1968 1969
			break;
}

1970
static int set_queue_count(struct nvme_dev *dev, int count)
M
Matthew Wilcox 已提交
1971 1972 1973
{
	int status;
	u32 result;
1974
	u32 q_count = (count - 1) | ((count - 1) << 16);
M
Matthew Wilcox 已提交
1975

1976
	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1977
								&result);
M
Matthew Wilcox 已提交
1978 1979 1980 1981 1982
	if (status < 0)
		return status;
	if (status > 0) {
		dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
									status);
1983
		return 0;
M
Matthew Wilcox 已提交
1984
	}
M
Matthew Wilcox 已提交
1985 1986 1987
	return min(result & 0xffff, result >> 16) + 1;
}

K
Keith Busch 已提交
1988 1989
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
1990
	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
K
Keith Busch 已提交
1991 1992
}

1993
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1994
{
M
Matias Bjørling 已提交
1995
	struct nvme_queue *adminq = dev->queues[0];
R
Ramachandra Rao Gajula 已提交
1996
	struct pci_dev *pdev = dev->pci_dev;
K
Keith Busch 已提交
1997
	int result, i, vecs, nr_io_queues, size;
M
Matthew Wilcox 已提交
1998

K
Keith Busch 已提交
1999
	nr_io_queues = num_possible_cpus();
2000
	result = set_queue_count(dev, nr_io_queues);
2001
	if (result <= 0)
M
Matthew Wilcox 已提交
2002
		return result;
2003 2004
	if (result < nr_io_queues)
		nr_io_queues = result;
M
Matthew Wilcox 已提交
2005

K
Keith Busch 已提交
2006 2007
	size = db_bar_size(dev, nr_io_queues);
	if (size > 8192) {
2008
		iounmap(dev->bar);
K
Keith Busch 已提交
2009 2010 2011 2012 2013 2014 2015 2016
		do {
			dev->bar = ioremap(pci_resource_start(pdev, 0), size);
			if (dev->bar)
				break;
			if (!--nr_io_queues)
				return -ENOMEM;
			size = db_bar_size(dev, nr_io_queues);
		} while (1);
2017
		dev->dbs = ((void __iomem *)dev->bar) + 4096;
2018
		adminq->q_db = dev->dbs;
2019 2020
	}

K
Keith Busch 已提交
2021
	/* Deregister the admin queue's interrupt */
2022
	free_irq(dev->entry[0].vector, adminq);
K
Keith Busch 已提交
2023

2024 2025 2026 2027 2028 2029 2030
	/*
	 * If we enable msix early due to not intx, disable it again before
	 * setting up the full range we need.
	 */
	if (!pdev->irq)
		pci_disable_msix(pdev);

2031
	for (i = 0; i < nr_io_queues; i++)
M
Matthew Wilcox 已提交
2032
		dev->entry[i].entry = i;
2033 2034 2035 2036 2037 2038 2039 2040
	vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
	if (vecs < 0) {
		vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
		if (vecs < 0) {
			vecs = 1;
		} else {
			for (i = 0; i < vecs; i++)
				dev->entry[i].vector = i + pdev->irq;
R
Ramachandra Rao Gajula 已提交
2041 2042 2043
		}
	}

2044 2045 2046 2047 2048 2049 2050
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */
	nr_io_queues = vecs;
K
Keith Busch 已提交
2051
	dev->max_qid = nr_io_queues;
2052

2053
	result = queue_request_irq(dev, adminq, adminq->irqname);
M
Matias Bjørling 已提交
2054
	if (result)
2055
		goto free_queues;
M
Matthew Wilcox 已提交
2056

2057
	/* Free previously allocated queues that are no longer usable */
K
Keith Busch 已提交
2058
	nvme_free_queues(dev, nr_io_queues + 1);
M
Matias Bjørling 已提交
2059
	nvme_create_io_queues(dev);
M
Matthew Wilcox 已提交
2060

2061
	return 0;
M
Matthew Wilcox 已提交
2062

2063
 free_queues:
2064
	nvme_free_queues(dev, 1);
2065
	return result;
M
Matthew Wilcox 已提交
2066 2067
}

2068 2069 2070 2071 2072 2073
/*
 * Return: error value if an error occurred setting up the queues or calling
 * Identify Device.  0 if these succeeded, even if adding some of the
 * namespaces failed.  At the moment, these failures are silent.  TBD which
 * failures should be reported.
 */
2074
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2075
{
2076
	struct pci_dev *pdev = dev->pci_dev;
2077 2078
	int res;
	unsigned nn, i;
2079
	struct nvme_ns *ns;
2080
	struct nvme_id_ctrl *ctrl;
2081 2082
	struct nvme_id_ns *id_ns;
	void *mem;
M
Matthew Wilcox 已提交
2083
	dma_addr_t dma_addr;
2084
	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
M
Matthew Wilcox 已提交
2085

2086
	mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
2087 2088
	if (!mem)
		return -ENOMEM;
M
Matthew Wilcox 已提交
2089

2090
	res = nvme_identify(dev, 0, 1, dma_addr);
M
Matthew Wilcox 已提交
2091
	if (res) {
M
Matthew Wilcox 已提交
2092
		dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
M
Matthew Wilcox 已提交
2093
		res = -EIO;
2094
		goto out;
M
Matthew Wilcox 已提交
2095 2096
	}

2097
	ctrl = mem;
2098
	nn = le32_to_cpup(&ctrl->nn);
2099
	dev->oncs = le16_to_cpup(&ctrl->oncs);
K
Keith Busch 已提交
2100
	dev->abort_limit = ctrl->acl + 1;
2101
	dev->vwc = ctrl->vwc;
K
Keith Busch 已提交
2102
	dev->event_limit = min(ctrl->aerl + 1, 8);
2103 2104 2105
	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
2106
	if (ctrl->mdts)
K
Keith Busch 已提交
2107
		dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
2108
	if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
M
Matias Bjørling 已提交
2109 2110 2111
			(pdev->device == 0x0953) && ctrl->vs[3]) {
		unsigned int max_hw_sectors;

2112
		dev->stripe_size = 1 << (ctrl->vs[3] + shift);
M
Matias Bjørling 已提交
2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
		max_hw_sectors = dev->stripe_size >> (shift - 9);
		if (dev->max_hw_sectors) {
			dev->max_hw_sectors = min(max_hw_sectors,
							dev->max_hw_sectors);
		} else
			dev->max_hw_sectors = max_hw_sectors;
	}

	dev->tagset.ops = &nvme_mq_ops;
	dev->tagset.nr_hw_queues = dev->online_queues - 1;
	dev->tagset.timeout = NVME_IO_TIMEOUT;
	dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
	dev->tagset.queue_depth =
				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
	dev->tagset.cmd_size = sizeof(struct nvme_cmd_info);
	dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
	dev->tagset.driver_data = dev;

	if (blk_mq_alloc_tag_set(&dev->tagset))
		goto out;
M
Matthew Wilcox 已提交
2133

2134
	id_ns = mem;
M
Matthew Wilcox 已提交
2135
	for (i = 1; i <= nn; i++) {
2136
		res = nvme_identify(dev, i, 0, dma_addr);
M
Matthew Wilcox 已提交
2137 2138 2139
		if (res)
			continue;

2140
		if (id_ns->ncap == 0)
M
Matthew Wilcox 已提交
2141 2142
			continue;

2143
		res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2144
							dma_addr + 4096, NULL);
M
Matthew Wilcox 已提交
2145
		if (res)
2146
			memset(mem + 4096, 0, 4096);
M
Matthew Wilcox 已提交
2147

2148
		ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
M
Matthew Wilcox 已提交
2149 2150 2151 2152 2153
		if (ns)
			list_add_tail(&ns->list, &dev->namespaces);
	}
	list_for_each_entry(ns, &dev->namespaces, list)
		add_disk(ns->disk);
2154
	res = 0;
M
Matthew Wilcox 已提交
2155

2156
 out:
2157
	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
M
Matthew Wilcox 已提交
2158 2159 2160
	return res;
}

2161 2162
static int nvme_dev_map(struct nvme_dev *dev)
{
K
Keith Busch 已提交
2163
	u64 cap;
2164 2165 2166 2167 2168 2169 2170 2171 2172
	int bars, result = -ENOMEM;
	struct pci_dev *pdev = dev->pci_dev;

	if (pci_enable_device_mem(pdev))
		return result;

	dev->entry[0].vector = pdev->irq;
	pci_set_master(pdev);
	bars = pci_select_bars(pdev, IORESOURCE_MEM);
2173 2174 2175
	if (!bars)
		goto disable_pci;

2176 2177 2178
	if (pci_request_selected_regions(pdev, bars, "nvme"))
		goto disable_pci;

2179 2180 2181
	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
		goto disable;
2182 2183 2184 2185

	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar)
		goto disable;
2186

K
Keith Busch 已提交
2187 2188 2189 2190
	if (readl(&dev->bar->csts) == -1) {
		result = -ENODEV;
		goto unmap;
	}
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201

	/*
	 * Some devices don't advertse INTx interrupts, pre-enable a single
	 * MSIX vec for setup. We'll adjust this later.
	 */
	if (!pdev->irq) {
		result = pci_enable_msix(pdev, dev->entry, 1);
		if (result < 0)
			goto unmap;
	}

K
Keith Busch 已提交
2202 2203 2204
	cap = readq(&dev->bar->cap);
	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2205 2206 2207 2208
	dev->dbs = ((void __iomem *)dev->bar) + 4096;

	return 0;

K
Keith Busch 已提交
2209 2210 2211
 unmap:
	iounmap(dev->bar);
	dev->bar = NULL;
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
 disable:
	pci_release_regions(pdev);
 disable_pci:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
{
	if (dev->pci_dev->msi_enabled)
		pci_disable_msi(dev->pci_dev);
	else if (dev->pci_dev->msix_enabled)
		pci_disable_msix(dev->pci_dev);

	if (dev->bar) {
		iounmap(dev->bar);
		dev->bar = NULL;
K
Keith Busch 已提交
2229
		pci_release_regions(dev->pci_dev);
2230 2231 2232 2233 2234 2235
	}

	if (pci_is_enabled(dev->pci_dev))
		pci_disable_device(dev->pci_dev);
}

K
Keith Busch 已提交
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296
struct nvme_delq_ctx {
	struct task_struct *waiter;
	struct kthread_worker *worker;
	atomic_t refcount;
};

static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
{
	dq->waiter = current;
	mb();

	for (;;) {
		set_current_state(TASK_KILLABLE);
		if (!atomic_read(&dq->refcount))
			break;
		if (!schedule_timeout(ADMIN_TIMEOUT) ||
					fatal_signal_pending(current)) {
			set_current_state(TASK_RUNNING);

			nvme_disable_ctrl(dev, readq(&dev->bar->cap));
			nvme_disable_queue(dev, 0);

			send_sig(SIGKILL, dq->worker->task, 1);
			flush_kthread_worker(dq->worker);
			return;
		}
	}
	set_current_state(TASK_RUNNING);
}

static void nvme_put_dq(struct nvme_delq_ctx *dq)
{
	atomic_dec(&dq->refcount);
	if (dq->waiter)
		wake_up_process(dq->waiter);
}

static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
{
	atomic_inc(&dq->refcount);
	return dq;
}

static void nvme_del_queue_end(struct nvme_queue *nvmeq)
{
	struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;

	nvme_clear_queue(nvmeq);
	nvme_put_dq(dq);
}

static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
						kthread_work_func_t fn)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(nvmeq->qid);

	init_kthread_work(&nvmeq->cmdinfo.work, fn);
M
Matias Bjørling 已提交
2297 2298
	return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo,
								ADMIN_TIMEOUT);
K
Keith Busch 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
}

static void nvme_del_cq_work_handler(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	nvme_del_queue_end(nvmeq);
}

static int nvme_delete_cq(struct nvme_queue *nvmeq)
{
	return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
						nvme_del_cq_work_handler);
}

static void nvme_del_sq_work_handler(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	int status = nvmeq->cmdinfo.status;

	if (!status)
		status = nvme_delete_cq(nvmeq);
	if (status)
		nvme_del_queue_end(nvmeq);
}

static int nvme_delete_sq(struct nvme_queue *nvmeq)
{
	return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
						nvme_del_sq_work_handler);
}

static void nvme_del_queue_start(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	allow_signal(SIGKILL);
	if (nvme_delete_sq(nvmeq))
		nvme_del_queue_end(nvmeq);
}

static void nvme_disable_io_queues(struct nvme_dev *dev)
{
	int i;
	DEFINE_KTHREAD_WORKER_ONSTACK(worker);
	struct nvme_delq_ctx dq;
	struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
					&worker, "nvme%d", dev->instance);

	if (IS_ERR(kworker_task)) {
		dev_err(&dev->pci_dev->dev,
			"Failed to create queue del task\n");
		for (i = dev->queue_count - 1; i > 0; i--)
			nvme_disable_queue(dev, i);
		return;
	}

	dq.waiter = NULL;
	atomic_set(&dq.refcount, 0);
	dq.worker = &worker;
	for (i = dev->queue_count - 1; i > 0; i--) {
M
Matias Bjørling 已提交
2361
		struct nvme_queue *nvmeq = dev->queues[i];
K
Keith Busch 已提交
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373

		if (nvme_suspend_queue(nvmeq))
			continue;
		nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
		nvmeq->cmdinfo.worker = dq.worker;
		init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
		queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
	}
	nvme_wait_dq(&dq, dev);
	kthread_stop(kworker_task);
}

2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
/*
* Remove the node from the device list and check
* for whether or not we need to stop the nvme_thread.
*/
static void nvme_dev_list_remove(struct nvme_dev *dev)
{
	struct task_struct *tmp = NULL;

	spin_lock(&dev_list_lock);
	list_del_init(&dev->node);
	if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
		tmp = nvme_thread;
		nvme_thread = NULL;
	}
	spin_unlock(&dev_list_lock);

	if (tmp)
		kthread_stop(tmp);
}

2394
static void nvme_dev_shutdown(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2395
{
2396
	int i;
2397
	u32 csts = -1;
2398

2399
	dev->initialized = 0;
2400
	nvme_dev_list_remove(dev);
2401

2402 2403 2404
	if (dev->bar)
		csts = readl(&dev->bar->csts);
	if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
K
Keith Busch 已提交
2405
		for (i = dev->queue_count - 1; i >= 0; i--) {
M
Matias Bjørling 已提交
2406
			struct nvme_queue *nvmeq = dev->queues[i];
K
Keith Busch 已提交
2407 2408 2409 2410 2411
			nvme_suspend_queue(nvmeq);
			nvme_clear_queue(nvmeq);
		}
	} else {
		nvme_disable_io_queues(dev);
K
Keith Busch 已提交
2412
		nvme_shutdown_ctrl(dev);
K
Keith Busch 已提交
2413 2414
		nvme_disable_queue(dev, 0);
	}
2415 2416 2417
	nvme_dev_unmap(dev);
}

M
Matias Bjørling 已提交
2418 2419 2420 2421 2422 2423
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
	if (dev->admin_q && !blk_queue_dying(dev->admin_q))
		blk_cleanup_queue(dev->admin_q);
}

2424 2425
static void nvme_dev_remove(struct nvme_dev *dev)
{
2426
	struct nvme_ns *ns;
2427

2428 2429 2430 2431 2432
	list_for_each_entry(ns, &dev->namespaces, list) {
		if (ns->disk->flags & GENHD_FL_UP)
			del_gendisk(ns->disk);
		if (!blk_queue_dying(ns->queue))
			blk_cleanup_queue(ns->queue);
M
Matthew Wilcox 已提交
2433 2434 2435
	}
}

M
Matthew Wilcox 已提交
2436 2437 2438 2439 2440 2441 2442 2443
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
	struct device *dmadev = &dev->pci_dev->dev;
	dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

2444 2445 2446 2447 2448 2449 2450
	/* Optimisation for I/Os between 4k and 128k */
	dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
2451 2452 2453 2454 2455 2456
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
2457
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
2458 2459
}

2460 2461 2462
static DEFINE_IDA(nvme_instance_ida);

static int nvme_set_instance(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2463
{
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
	int instance, error;

	do {
		if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
			return -ENODEV;

		spin_lock(&dev_list_lock);
		error = ida_get_new(&nvme_instance_ida, &instance);
		spin_unlock(&dev_list_lock);
	} while (error == -EAGAIN);

	if (error)
		return -ENODEV;

	dev->instance = instance;
	return 0;
M
Matthew Wilcox 已提交
2480 2481 2482 2483
}

static void nvme_release_instance(struct nvme_dev *dev)
{
2484 2485 2486
	spin_lock(&dev_list_lock);
	ida_remove(&nvme_instance_ida, dev->instance);
	spin_unlock(&dev_list_lock);
M
Matthew Wilcox 已提交
2487 2488
}

2489 2490 2491 2492 2493 2494
static void nvme_free_namespaces(struct nvme_dev *dev)
{
	struct nvme_ns *ns, *next;

	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
2495 2496 2497 2498 2499

		spin_lock(&dev_list_lock);
		ns->disk->private_data = NULL;
		spin_unlock(&dev_list_lock);

2500 2501 2502 2503 2504
		put_disk(ns->disk);
		kfree(ns);
	}
}

2505 2506 2507
static void nvme_free_dev(struct kref *kref)
{
	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2508

K
Keith Busch 已提交
2509
	pci_dev_put(dev->pci_dev);
2510
	nvme_free_namespaces(dev);
2511
	nvme_release_instance(dev);
M
Matias Bjørling 已提交
2512
	blk_mq_free_tag_set(&dev->tagset);
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
}

static int nvme_dev_open(struct inode *inode, struct file *f)
{
	struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
								miscdev);
	kref_get(&dev->kref);
	f->private_data = dev;
	return 0;
}

static int nvme_dev_release(struct inode *inode, struct file *f)
{
	struct nvme_dev *dev = f->private_data;
	kref_put(&dev->kref, nvme_free_dev);
	return 0;
}

static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
	struct nvme_dev *dev = f->private_data;
M
Matias Bjørling 已提交
2537 2538
	struct nvme_ns *ns;

2539 2540
	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
M
Matias Bjørling 已提交
2541
		return nvme_user_cmd(dev, NULL, (void __user *)arg);
2542
	case NVME_IOCTL_IO_CMD:
M
Matias Bjørling 已提交
2543 2544 2545 2546
		if (list_empty(&dev->namespaces))
			return -ENOTTY;
		ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
		return nvme_user_cmd(dev, ns, (void __user *)arg);
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
	default:
		return -ENOTTY;
	}
}

static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
	.release	= nvme_dev_release,
	.unlocked_ioctl	= nvme_dev_ioctl,
	.compat_ioctl	= nvme_dev_ioctl,
};

M
Matias Bjørling 已提交
2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
static void nvme_set_irq_hints(struct nvme_dev *dev)
{
	struct nvme_queue *nvmeq;
	int i;

	for (i = 0; i < dev->online_queues; i++) {
		nvmeq = dev->queues[i];

		if (!nvmeq->hctx)
			continue;

		irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
							nvmeq->hctx->cpumask);
	}
}

2576 2577 2578
static int nvme_dev_start(struct nvme_dev *dev)
{
	int result;
2579
	bool start_thread = false;
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589

	result = nvme_dev_map(dev);
	if (result)
		return result;

	result = nvme_configure_admin_queue(dev);
	if (result)
		goto unmap;

	spin_lock(&dev_list_lock);
2590 2591 2592 2593
	if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
		start_thread = true;
		nvme_thread = NULL;
	}
2594 2595 2596
	list_add(&dev->node, &dev_list);
	spin_unlock(&dev_list_lock);

2597 2598
	if (start_thread) {
		nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2599
		wake_up_all(&nvme_kthread_wait);
2600 2601 2602 2603 2604 2605 2606
	} else
		wait_event_killable(nvme_kthread_wait, nvme_thread);

	if (IS_ERR_OR_NULL(nvme_thread)) {
		result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
		goto disable;
	}
M
Matias Bjørling 已提交
2607 2608

	nvme_init_queue(dev->queues[0], 0);
2609

2610
	result = nvme_setup_io_queues(dev);
2611
	if (result)
2612 2613
		goto disable;

M
Matias Bjørling 已提交
2614 2615
	nvme_set_irq_hints(dev);

2616
	return result;
2617 2618

 disable:
2619
	nvme_disable_queue(dev, 0);
2620
	nvme_dev_list_remove(dev);
2621 2622 2623 2624 2625
 unmap:
	nvme_dev_unmap(dev);
	return result;
}

K
Keith Busch 已提交
2626 2627 2628 2629 2630 2631
static int nvme_remove_dead_ctrl(void *arg)
{
	struct nvme_dev *dev = (struct nvme_dev *)arg;
	struct pci_dev *pdev = dev->pci_dev;

	if (pci_get_drvdata(pdev))
2632
		pci_stop_and_remove_bus_device_locked(pdev);
K
Keith Busch 已提交
2633 2634 2635 2636 2637 2638 2639 2640
	kref_put(&dev->kref, nvme_free_dev);
	return 0;
}

static void nvme_remove_disks(struct work_struct *ws)
{
	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);

2641
	nvme_free_queues(dev, 1);
2642
	nvme_dev_remove(dev);
K
Keith Busch 已提交
2643 2644 2645 2646 2647 2648 2649
}

static int nvme_dev_resume(struct nvme_dev *dev)
{
	int ret;

	ret = nvme_dev_start(dev);
2650
	if (ret)
K
Keith Busch 已提交
2651
		return ret;
2652
	if (dev->online_queues < 2) {
K
Keith Busch 已提交
2653
		spin_lock(&dev_list_lock);
T
Tejun Heo 已提交
2654
		dev->reset_workfn = nvme_remove_disks;
K
Keith Busch 已提交
2655 2656 2657
		queue_work(nvme_workq, &dev->reset_work);
		spin_unlock(&dev_list_lock);
	}
2658
	dev->initialized = 1;
K
Keith Busch 已提交
2659 2660 2661 2662 2663 2664 2665
	return 0;
}

static void nvme_dev_reset(struct nvme_dev *dev)
{
	nvme_dev_shutdown(dev);
	if (nvme_dev_resume(dev)) {
M
Matias Bjørling 已提交
2666
		dev_warn(&dev->pci_dev->dev, "Device failed to resume\n");
K
Keith Busch 已提交
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
		kref_get(&dev->kref);
		if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
							dev->instance))) {
			dev_err(&dev->pci_dev->dev,
				"Failed to start controller remove task\n");
			kref_put(&dev->kref, nvme_free_dev);
		}
	}
}

static void nvme_reset_failed_dev(struct work_struct *ws)
{
	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
	nvme_dev_reset(dev);
}

T
Tejun Heo 已提交
2683 2684 2685 2686 2687 2688
static void nvme_reset_workfn(struct work_struct *work)
{
	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
	dev->reset_workfn(work);
}

2689
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
2690
{
M
Matias Bjørling 已提交
2691
	int node, result = -ENOMEM;
M
Matthew Wilcox 已提交
2692 2693
	struct nvme_dev *dev;

M
Matias Bjørling 已提交
2694 2695 2696 2697 2698
	node = dev_to_node(&pdev->dev);
	if (node == NUMA_NO_NODE)
		set_dev_node(&pdev->dev, 0);

	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2699 2700
	if (!dev)
		return -ENOMEM;
M
Matias Bjørling 已提交
2701 2702
	dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
							GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2703 2704
	if (!dev->entry)
		goto free;
M
Matias Bjørling 已提交
2705 2706
	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
							GFP_KERNEL, node);
M
Matthew Wilcox 已提交
2707 2708 2709 2710
	if (!dev->queues)
		goto free;

	INIT_LIST_HEAD(&dev->namespaces);
T
Tejun Heo 已提交
2711 2712
	dev->reset_workfn = nvme_reset_failed_dev;
	INIT_WORK(&dev->reset_work, nvme_reset_workfn);
K
Keith Busch 已提交
2713
	dev->pci_dev = pci_dev_get(pdev);
K
Keith Busch 已提交
2714
	pci_set_drvdata(pdev, dev);
2715 2716
	result = nvme_set_instance(dev);
	if (result)
K
Keith Busch 已提交
2717
		goto put_pci;
M
Matthew Wilcox 已提交
2718

M
Matthew Wilcox 已提交
2719 2720
	result = nvme_setup_prp_pools(dev);
	if (result)
2721
		goto release;
M
Matthew Wilcox 已提交
2722

2723
	kref_init(&dev->kref);
2724
	result = nvme_dev_start(dev);
2725
	if (result)
2726
		goto release_pools;
M
Matthew Wilcox 已提交
2727

2728 2729
	if (dev->online_queues > 1)
		result = nvme_dev_add(dev);
2730
	if (result)
2731
		goto shutdown;
2732

2733 2734 2735 2736 2737 2738 2739 2740 2741
	scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
	dev->miscdev.minor = MISC_DYNAMIC_MINOR;
	dev->miscdev.parent = &pdev->dev;
	dev->miscdev.name = dev->name;
	dev->miscdev.fops = &nvme_dev_fops;
	result = misc_register(&dev->miscdev);
	if (result)
		goto remove;

M
Matias Bjørling 已提交
2742 2743
	nvme_set_irq_hints(dev);

2744
	dev->initialized = 1;
M
Matthew Wilcox 已提交
2745 2746
	return 0;

2747 2748
 remove:
	nvme_dev_remove(dev);
M
Matias Bjørling 已提交
2749
	nvme_dev_remove_admin(dev);
2750
	nvme_free_namespaces(dev);
2751 2752
 shutdown:
	nvme_dev_shutdown(dev);
2753
 release_pools:
2754
	nvme_free_queues(dev, 0);
M
Matthew Wilcox 已提交
2755
	nvme_release_prp_pools(dev);
2756 2757
 release:
	nvme_release_instance(dev);
K
Keith Busch 已提交
2758 2759
 put_pci:
	pci_dev_put(dev->pci_dev);
M
Matthew Wilcox 已提交
2760 2761 2762 2763 2764 2765 2766
 free:
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
	return result;
}

2767 2768
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
K
Keith Busch 已提交
2769
	struct nvme_dev *dev = pci_get_drvdata(pdev);
2770

K
Keith Busch 已提交
2771 2772 2773 2774
	if (prepare)
		nvme_dev_shutdown(dev);
	else
		nvme_dev_resume(dev);
2775 2776
}

2777 2778 2779 2780 2781 2782
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
	nvme_dev_shutdown(dev);
}

2783
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2784 2785
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2786 2787 2788 2789 2790 2791 2792

	spin_lock(&dev_list_lock);
	list_del_init(&dev->node);
	spin_unlock(&dev_list_lock);

	pci_set_drvdata(pdev, NULL);
	flush_work(&dev->reset_work);
2793
	misc_deregister(&dev->miscdev);
M
Matias Bjørling 已提交
2794
	nvme_dev_remove(dev);
K
Keith Busch 已提交
2795
	nvme_dev_shutdown(dev);
M
Matias Bjørling 已提交
2796
	nvme_dev_remove_admin(dev);
2797
	nvme_free_queues(dev, 0);
M
Matias Bjørling 已提交
2798
	nvme_free_admin_tags(dev);
K
Keith Busch 已提交
2799
	nvme_release_prp_pools(dev);
2800
	kref_put(&dev->kref, nvme_free_dev);
M
Matthew Wilcox 已提交
2801 2802 2803 2804 2805 2806 2807 2808
}

/* These functions are yet to be implemented */
#define nvme_error_detected NULL
#define nvme_dump_registers NULL
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
2809

2810
#ifdef CONFIG_PM_SLEEP
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

	nvme_dev_shutdown(ndev);
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

K
Keith Busch 已提交
2825
	if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
T
Tejun Heo 已提交
2826
		ndev->reset_workfn = nvme_reset_failed_dev;
K
Keith Busch 已提交
2827 2828 2829
		queue_work(nvme_workq, &ndev->reset_work);
	}
	return 0;
2830
}
2831
#endif
2832 2833

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2834

2835
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2836 2837 2838 2839 2840
	.error_detected	= nvme_error_detected,
	.mmio_enabled	= nvme_dump_registers,
	.link_reset	= nvme_link_reset,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
2841
	.reset_notify	= nvme_reset_notify,
M
Matthew Wilcox 已提交
2842 2843 2844 2845 2846
};

/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS	0x010802

2847
static const struct pci_device_id nvme_id_table[] = {
M
Matthew Wilcox 已提交
2848 2849 2850 2851 2852 2853 2854 2855 2856
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2857
	.remove		= nvme_remove,
2858
	.shutdown	= nvme_shutdown,
2859 2860 2861
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
M
Matthew Wilcox 已提交
2862 2863 2864 2865 2866
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2867
	int result;
2868

2869
	init_waitqueue_head(&nvme_kthread_wait);
M
Matthew Wilcox 已提交
2870

K
Keith Busch 已提交
2871 2872
	nvme_workq = create_singlethread_workqueue("nvme");
	if (!nvme_workq)
2873
		return -ENOMEM;
K
Keith Busch 已提交
2874

2875 2876
	result = register_blkdev(nvme_major, "nvme");
	if (result < 0)
K
Keith Busch 已提交
2877
		goto kill_workq;
2878
	else if (result > 0)
2879
		nvme_major = result;
M
Matthew Wilcox 已提交
2880

2881 2882
	result = pci_register_driver(&nvme_driver);
	if (result)
M
Matias Bjørling 已提交
2883
		goto unregister_blkdev;
2884
	return 0;
M
Matthew Wilcox 已提交
2885

2886
 unregister_blkdev:
M
Matthew Wilcox 已提交
2887
	unregister_blkdev(nvme_major, "nvme");
K
Keith Busch 已提交
2888 2889
 kill_workq:
	destroy_workqueue(nvme_workq);
M
Matthew Wilcox 已提交
2890 2891 2892 2893 2894 2895
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
2896
	unregister_hotcpu_notifier(&nvme_nb);
M
Matthew Wilcox 已提交
2897
	unregister_blkdev(nvme_major, "nvme");
K
Keith Busch 已提交
2898
	destroy_workqueue(nvme_workq);
2899
	BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2900
	_nvme_check_size();
M
Matthew Wilcox 已提交
2901 2902 2903 2904
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
2905
MODULE_VERSION("1.0");
M
Matthew Wilcox 已提交
2906 2907
module_init(nvme_init);
module_exit(nvme_exit);