nvme-core.c 65.2 KB
Newer Older
M
Matthew Wilcox 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * NVM Express device driver
 * Copyright (c) 2011, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/nvme.h>
#include <linux/bio.h>
21
#include <linux/bitops.h>
M
Matthew Wilcox 已提交
22
#include <linux/blkdev.h>
23
#include <linux/delay.h>
M
Matthew Wilcox 已提交
24 25 26
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
27
#include <linux/idr.h>
M
Matthew Wilcox 已提交
28 29 30 31
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
32
#include <linux/kthread.h>
M
Matthew Wilcox 已提交
33 34 35 36 37
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
38
#include <linux/poison.h>
39
#include <linux/ptrace.h>
M
Matthew Wilcox 已提交
40 41 42
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
V
Vishal Verma 已提交
43
#include <scsi/sg.h>
44 45
#include <asm-generic/io-64-nonatomic-lo-hi.h>

M
Matthew Wilcox 已提交
46 47 48
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
49
#define ADMIN_TIMEOUT	(60 * HZ)
M
Matthew Wilcox 已提交
50 51 52 53

static int nvme_major;
module_param(nvme_major, int, 0);

54 55 56
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

57 58 59
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
K
Keith Busch 已提交
60
static struct workqueue_struct *nvme_workq;
61

62 63
static void nvme_reset_failed_dev(struct work_struct *ws);

K
Keith Busch 已提交
64 65 66 67 68 69 70
struct async_cmd_info {
	struct kthread_work work;
	struct kthread_worker *worker;
	u32 result;
	int status;
	void *ctx;
};
71

M
Matthew Wilcox 已提交
72 73 74 75 76
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
77
	struct rcu_head r_head;
M
Matthew Wilcox 已提交
78
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
79
	struct nvme_dev *dev;
80
	char irqname[24];	/* nvme4294967295-65535\0 */
M
Matthew Wilcox 已提交
81 82 83 84 85 86
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
	volatile struct nvme_completion *cqes;
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	wait_queue_head_t sq_full;
87
	wait_queue_t sq_cong_wait;
M
Matthew Wilcox 已提交
88 89 90 91 92 93 94
	struct bio_list sq_cong;
	u32 __iomem *q_db;
	u16 q_depth;
	u16 cq_vector;
	u16 sq_head;
	u16 sq_tail;
	u16 cq_head;
K
Keith Busch 已提交
95
	u16 qid;
96 97
	u8 cq_phase;
	u8 cqe_seen;
98
	u8 q_suspended;
K
Keith Busch 已提交
99
	struct async_cmd_info cmdinfo;
M
Matthew Wilcox 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
	unsigned long cmdid_data[];
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
113
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
K
Keith Busch 已提交
114
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
M
Matthew Wilcox 已提交
115 116 117 118
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
K
Keith Busch 已提交
119
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
M
Matthew Wilcox 已提交
120 121
}

122
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
123 124
						struct nvme_completion *);

125
struct nvme_cmd_info {
126 127
	nvme_completion_fn fn;
	void *ctx;
128
	unsigned long timeout;
K
Keith Busch 已提交
129
	int aborted;
130 131 132 133 134 135 136
};

static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
{
	return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
}

137 138 139 140 141
static unsigned nvme_queue_extra(int depth)
{
	return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info));
}

M
Matthew Wilcox 已提交
142
/**
143 144 145
 * alloc_cmdid() - Allocate a Command ID
 * @nvmeq: The queue that will be used for this command
 * @ctx: A pointer that will be passed to the handler
146
 * @handler: The function to call on completion
M
Matthew Wilcox 已提交
147 148 149 150 151 152
 *
 * Allocate a Command ID for a queue.  The data passed in will
 * be passed to the completion handler.  This is implemented by using
 * the bottom two bits of the ctx pointer to store the handler ID.
 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
 * We can change this if it becomes a problem.
153 154 155
 *
 * May be called with local interrupts disabled and the q_lock held,
 * or with interrupts enabled and no locks held.
M
Matthew Wilcox 已提交
156
 */
157 158
static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
				nvme_completion_fn handler, unsigned timeout)
M
Matthew Wilcox 已提交
159
{
160
	int depth = nvmeq->q_depth - 1;
161
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
162 163 164 165 166 167 168 169
	int cmdid;

	do {
		cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
		if (cmdid >= depth)
			return -EBUSY;
	} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));

170 171
	info[cmdid].fn = handler;
	info[cmdid].ctx = ctx;
172
	info[cmdid].timeout = jiffies + timeout;
K
Keith Busch 已提交
173
	info[cmdid].aborted = 0;
M
Matthew Wilcox 已提交
174 175 176 177
	return cmdid;
}

static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
178
				nvme_completion_fn handler, unsigned timeout)
M
Matthew Wilcox 已提交
179 180 181
{
	int cmdid;
	wait_event_killable(nvmeq->sq_full,
182
		(cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
M
Matthew Wilcox 已提交
183 184 185
	return (cmdid < 0) ? -EINTR : cmdid;
}

186 187
/* Special values must be less than 0x1000 */
#define CMD_CTX_BASE		((void *)POISON_POINTER_DELTA)
188 189 190
#define CMD_CTX_CANCELLED	(0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
M
Matthew Wilcox 已提交
191
#define CMD_CTX_FLUSH		(0x318 + CMD_CTX_BASE)
K
Keith Busch 已提交
192
#define CMD_CTX_ABORT		(0x31C + CMD_CTX_BASE)
193

194
static void special_completion(struct nvme_dev *dev, void *ctx,
195 196 197 198 199 200
						struct nvme_completion *cqe)
{
	if (ctx == CMD_CTX_CANCELLED)
		return;
	if (ctx == CMD_CTX_FLUSH)
		return;
K
Keith Busch 已提交
201 202 203 204
	if (ctx == CMD_CTX_ABORT) {
		++dev->abort_limit;
		return;
	}
205
	if (ctx == CMD_CTX_COMPLETED) {
206
		dev_warn(&dev->pci_dev->dev,
207 208 209 210 211
				"completed id %d twice on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
	if (ctx == CMD_CTX_INVALID) {
212
		dev_warn(&dev->pci_dev->dev,
213 214 215 216 217
				"invalid id %d completed on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}

218
	dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
219 220
}

K
Keith Busch 已提交
221 222 223 224 225 226 227 228 229
static void async_completion(struct nvme_dev *dev, void *ctx,
						struct nvme_completion *cqe)
{
	struct async_cmd_info *cmdinfo = ctx;
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
}

230 231 232
/*
 * Called with local interrupts disabled and the q_lock held.  May not sleep.
 */
233 234
static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
						nvme_completion_fn *fn)
M
Matthew Wilcox 已提交
235
{
236
	void *ctx;
237
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
238

239 240
	if (cmdid >= nvmeq->q_depth) {
		*fn = special_completion;
241
		return CMD_CTX_INVALID;
242
	}
243 244
	if (fn)
		*fn = info[cmdid].fn;
245 246
	ctx = info[cmdid].ctx;
	info[cmdid].fn = special_completion;
247
	info[cmdid].ctx = CMD_CTX_COMPLETED;
M
Matthew Wilcox 已提交
248 249
	clear_bit(cmdid, nvmeq->cmdid_data);
	wake_up(&nvmeq->sq_full);
250
	return ctx;
M
Matthew Wilcox 已提交
251 252
}

253 254
static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
						nvme_completion_fn *fn)
255
{
256
	void *ctx;
257
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
258 259 260 261
	if (fn)
		*fn = info[cmdid].fn;
	ctx = info[cmdid].ctx;
	info[cmdid].fn = special_completion;
262
	info[cmdid].ctx = CMD_CTX_CANCELLED;
263
	return ctx;
264 265
}

266
static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
M
Matthew Wilcox 已提交
267
{
268
	return rcu_dereference_raw(dev->queues[qid]);
M
Matthew Wilcox 已提交
269 270
}

271
static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
272 273 274 275 276
{
	rcu_read_lock();
	return rcu_dereference(dev->queues[get_cpu() + 1]);
}

277
static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
M
Matthew Wilcox 已提交
278
{
M
Matthew Wilcox 已提交
279
	put_cpu();
280
	rcu_read_unlock();
M
Matthew Wilcox 已提交
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294
static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
							__acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(dev->queues[q_idx]);
}

static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
{
	rcu_read_unlock();
}

M
Matthew Wilcox 已提交
295
/**
296
 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
297 298 299 300 301 302 303 304 305 306
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
	unsigned long flags;
	u16 tail;
	spin_lock_irqsave(&nvmeq->q_lock, flags);
307 308 309 310
	if (nvmeq->q_suspended) {
		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
		return -EBUSY;
	}
M
Matthew Wilcox 已提交
311 312 313 314
	tail = nvmeq->sq_tail;
	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
	if (++tail == nvmeq->q_depth)
		tail = 0;
315
	writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
316 317 318 319 320 321
	nvmeq->sq_tail = tail;
	spin_unlock_irqrestore(&nvmeq->q_lock, flags);

	return 0;
}

322
static __le64 **iod_list(struct nvme_iod *iod)
323
{
324
	return ((void *)iod) + iod->offset;
325 326
}

327 328 329 330 331 332 333 334 335 336
/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size)
{
	unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
M
Matthew Wilcox 已提交
337

338 339
static struct nvme_iod *
nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
M
Matthew Wilcox 已提交
340
{
341 342 343 344 345 346 347 348
	struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
				sizeof(__le64 *) * nvme_npages(nbytes) +
				sizeof(struct scatterlist) * nseg, gfp);

	if (iod) {
		iod->offset = offsetof(struct nvme_iod, sg[nseg]);
		iod->npages = -1;
		iod->length = nbytes;
K
Keith Busch 已提交
349
		iod->nents = 0;
K
Keith Busch 已提交
350
		iod->start_time = jiffies;
351 352 353
	}

	return iod;
M
Matthew Wilcox 已提交
354 355
}

V
Vishal Verma 已提交
356
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
M
Matthew Wilcox 已提交
357
{
358 359 360 361 362 363 364 365 366 367 368 369 370 371
	const int last_prp = PAGE_SIZE / 8 - 1;
	int i;
	__le64 **list = iod_list(iod);
	dma_addr_t prp_dma = iod->first_dma;

	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
	for (i = 0; i < iod->npages; i++) {
		__le64 *prp_list = list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
		prp_dma = next_prp_dma;
	}
	kfree(iod);
M
Matthew Wilcox 已提交
372 373
}

K
Keith Busch 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static void nvme_start_io_acct(struct bio *bio)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
	const int rw = bio_data_dir(bio);
	int cpu = part_stat_lock();
	part_round_stats(cpu, &disk->part0);
	part_stat_inc(cpu, &disk->part0, ios[rw]);
	part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
	part_inc_in_flight(&disk->part0, rw);
	part_stat_unlock();
}

static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
	const int rw = bio_data_dir(bio);
	unsigned long duration = jiffies - start_time;
	int cpu = part_stat_lock();
	part_stat_add(cpu, &disk->part0, ticks[rw], duration);
	part_round_stats(cpu, &disk->part0);
	part_dec_in_flight(&disk->part0, rw);
	part_stat_unlock();
}

398
static void bio_completion(struct nvme_dev *dev, void *ctx,
M
Matthew Wilcox 已提交
399 400
						struct nvme_completion *cqe)
{
401 402
	struct nvme_iod *iod = ctx;
	struct bio *bio = iod->private;
M
Matthew Wilcox 已提交
403 404
	u16 status = le16_to_cpup(&cqe->status) >> 1;

405
	if (iod->nents) {
K
Keith Busch 已提交
406
		dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
M
Matthew Wilcox 已提交
407
			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
408 409
		nvme_end_io_acct(bio, iod->start_time);
	}
410
	nvme_free_iod(dev, iod);
411
	if (status)
412
		bio_endio(bio, -EIO);
413
	else
414
		bio_endio(bio, 0);
M
Matthew Wilcox 已提交
415 416
}

417
/* length is in bytes.  gfp flags indicates whether we may sleep. */
V
Vishal Verma 已提交
418 419
int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
			struct nvme_iod *iod, int total_len, gfp_t gfp)
M
Matthew Wilcox 已提交
420
{
421
	struct dma_pool *pool;
422 423
	int length = total_len;
	struct scatterlist *sg = iod->sg;
M
Matthew Wilcox 已提交
424 425 426
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
	int offset = offset_in_page(dma_addr);
427
	__le64 *prp_list;
428
	__le64 **list = iod_list(iod);
429
	dma_addr_t prp_dma;
430
	int nprps, i;
M
Matthew Wilcox 已提交
431 432 433 434

	cmd->prp1 = cpu_to_le64(dma_addr);
	length -= (PAGE_SIZE - offset);
	if (length <= 0)
435
		return total_len;
M
Matthew Wilcox 已提交
436 437 438 439 440 441 442 443 444 445 446 447

	dma_len -= (PAGE_SIZE - offset);
	if (dma_len) {
		dma_addr += (PAGE_SIZE - offset);
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

	if (length <= PAGE_SIZE) {
		cmd->prp2 = cpu_to_le64(dma_addr);
448
		return total_len;
449 450 451
	}

	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
452 453
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
454
		iod->npages = 0;
455 456
	} else {
		pool = dev->prp_page_pool;
457
		iod->npages = 1;
458 459
	}

460 461 462
	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
	if (!prp_list) {
		cmd->prp2 = cpu_to_le64(dma_addr);
463 464
		iod->npages = -1;
		return (total_len - length) + PAGE_SIZE;
465
	}
466 467
	list[0] = prp_list;
	iod->first_dma = prp_dma;
468 469 470
	cmd->prp2 = cpu_to_le64(prp_dma);
	i = 0;
	for (;;) {
471
		if (i == PAGE_SIZE / 8) {
472
			__le64 *old_prp_list = prp_list;
473
			prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
474 475 476
			if (!prp_list)
				return total_len - length;
			list[iod->npages++] = prp_list;
477 478 479
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
480 481 482 483 484 485 486 487 488 489 490 491 492
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
		dma_len -= PAGE_SIZE;
		dma_addr += PAGE_SIZE;
		length -= PAGE_SIZE;
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
493 494
	}

495
	return total_len;
M
Matthew Wilcox 已提交
496 497
}

498
static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
K
Kent Overstreet 已提交
499
				 int len)
500
{
K
Kent Overstreet 已提交
501 502
	struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
	if (!split)
503 504
		return -ENOMEM;

K
Kent Overstreet 已提交
505 506
	bio_chain(split, bio);

507 508
	if (bio_list_empty(&nvmeq->sq_cong))
		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
K
Kent Overstreet 已提交
509 510
	bio_list_add(&nvmeq->sq_cong, split);
	bio_list_add(&nvmeq->sq_cong, bio);
511 512 513 514

	return 0;
}

515 516 517 518
/* NVMe scatterlists require no holes in the virtual address */
#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)	((vec2)->bv_offset || \
			(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))

519
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
M
Matthew Wilcox 已提交
520 521
		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
522 523
	struct bio_vec bvec, bvprv;
	struct bvec_iter iter;
524
	struct scatterlist *sg = NULL;
525 526
	int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
	int first = 1;
527 528 529

	if (nvmeq->dev->stripe_size)
		split_len = nvmeq->dev->stripe_size -
530 531
			((bio->bi_iter.bi_sector << 9) &
			 (nvmeq->dev->stripe_size - 1));
M
Matthew Wilcox 已提交
532

533
	sg_init_table(iod->sg, psegs);
534 535 536
	bio_for_each_segment(bvec, bio, iter) {
		if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
			sg->length += bvec.bv_len;
537
		} else {
538 539
			if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
				return nvme_split_and_submit(bio, nvmeq,
K
Kent Overstreet 已提交
540
							     length);
541

542
			sg = sg ? sg + 1 : iod->sg;
543 544
			sg_set_page(sg, bvec.bv_page,
				    bvec.bv_len, bvec.bv_offset);
545 546
			nsegs++;
		}
547

548
		if (split_len - length < bvec.bv_len)
K
Kent Overstreet 已提交
549
			return nvme_split_and_submit(bio, nvmeq, split_len);
550
		length += bvec.bv_len;
551
		bvprv = bvec;
552
		first = 0;
M
Matthew Wilcox 已提交
553
	}
554
	iod->nents = nsegs;
555
	sg_mark_end(sg);
556
	if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
557
		return -ENOMEM;
558

559
	BUG_ON(length != bio->bi_iter.bi_size);
560
	return length;
M
Matthew Wilcox 已提交
561 562
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
/*
 * We reuse the small pool to allocate the 16-byte range here as it is not
 * worth having a special pool for these or additional cases to handle freeing
 * the iod.
 */
static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
		struct bio *bio, struct nvme_iod *iod, int cmdid)
{
	struct nvme_dsm_range *range;
	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];

	range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
							&iod->first_dma);
	if (!range)
		return -ENOMEM;

	iod_list(iod)[0] = (__le64 *)range;
	iod->npages = 0;

	range->cattr = cpu_to_le32(0);
583 584
	range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
	range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->dsm.opcode = nvme_cmd_dsm;
	cmnd->dsm.command_id = cmdid;
	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
	cmnd->dsm.nr = 0;
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	writel(nvmeq->sq_tail, nvmeq->q_db);

	return 0;
}

M
Matthew Wilcox 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
								int cmdid)
{
	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->common.opcode = nvme_cmd_flush;
	cmnd->common.command_id = cmdid;
	cmnd->common.nsid = cpu_to_le32(ns->ns_id);

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	writel(nvmeq->sq_tail, nvmeq->q_db);

	return 0;
}

V
Vishal Verma 已提交
618
int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
M
Matthew Wilcox 已提交
619 620
{
	int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
621
					special_completion, NVME_IO_TIMEOUT);
M
Matthew Wilcox 已提交
622 623 624 625 626 627
	if (unlikely(cmdid < 0))
		return cmdid;

	return nvme_submit_flush(nvmeq, ns, cmdid);
}

628 629 630
/*
 * Called with local interrupts disabled and the q_lock held.  May not sleep.
 */
M
Matthew Wilcox 已提交
631 632 633
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
								struct bio *bio)
{
M
Matthew Wilcox 已提交
634
	struct nvme_command *cmnd;
635
	struct nvme_iod *iod;
M
Matthew Wilcox 已提交
636
	enum dma_data_direction dma_dir;
637
	int cmdid, length, result;
M
Matthew Wilcox 已提交
638 639 640 641
	u16 control;
	u32 dsmgmt;
	int psegs = bio_phys_segments(ns->queue, bio);

M
Matthew Wilcox 已提交
642 643 644 645 646 647
	if ((bio->bi_rw & REQ_FLUSH) && psegs) {
		result = nvme_submit_flush_data(nvmeq, ns);
		if (result)
			return result;
	}

648
	result = -ENOMEM;
649
	iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
650
	if (!iod)
651
		goto nomem;
652
	iod->private = bio;
M
Matthew Wilcox 已提交
653

654
	result = -EBUSY;
655
	cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
M
Matthew Wilcox 已提交
656
	if (unlikely(cmdid < 0))
657
		goto free_iod;
M
Matthew Wilcox 已提交
658

659 660 661 662 663 664
	if (bio->bi_rw & REQ_DISCARD) {
		result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
		if (result)
			goto free_cmdid;
		return result;
	}
M
Matthew Wilcox 已提交
665 666 667
	if ((bio->bi_rw & REQ_FLUSH) && !psegs)
		return nvme_submit_flush(nvmeq, ns, cmdid);

M
Matthew Wilcox 已提交
668 669 670 671 672 673 674 675 676 677
	control = 0;
	if (bio->bi_rw & REQ_FUA)
		control |= NVME_RW_FUA;
	if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	dsmgmt = 0;
	if (bio->bi_rw & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

M
Matthew Wilcox 已提交
678
	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
M
Matthew Wilcox 已提交
679

680
	memset(cmnd, 0, sizeof(*cmnd));
M
Matthew Wilcox 已提交
681
	if (bio_data_dir(bio)) {
M
Matthew Wilcox 已提交
682
		cmnd->rw.opcode = nvme_cmd_write;
M
Matthew Wilcox 已提交
683 684
		dma_dir = DMA_TO_DEVICE;
	} else {
M
Matthew Wilcox 已提交
685
		cmnd->rw.opcode = nvme_cmd_read;
M
Matthew Wilcox 已提交
686 687 688
		dma_dir = DMA_FROM_DEVICE;
	}

689 690
	result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
	if (result <= 0)
691
		goto free_cmdid;
692
	length = result;
M
Matthew Wilcox 已提交
693

M
Matthew Wilcox 已提交
694 695
	cmnd->rw.command_id = cmdid;
	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
696 697
	length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
								GFP_ATOMIC);
698
	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
699
	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
M
Matthew Wilcox 已提交
700 701
	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
M
Matthew Wilcox 已提交
702

K
Keith Busch 已提交
703
	nvme_start_io_acct(bio);
M
Matthew Wilcox 已提交
704 705
	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
706
	writel(nvmeq->sq_tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
707

708 709
	return 0;

710 711
 free_cmdid:
	free_cmdid(nvmeq, cmdid, NULL);
712 713
 free_iod:
	nvme_free_iod(nvmeq->dev, iod);
714 715
 nomem:
	return result;
M
Matthew Wilcox 已提交
716 717
}

718
static int nvme_process_cq(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
719
{
M
Matthew Wilcox 已提交
720
	u16 head, phase;
M
Matthew Wilcox 已提交
721 722

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
723
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
724 725

	for (;;) {
726 727
		void *ctx;
		nvme_completion_fn fn;
M
Matthew Wilcox 已提交
728
		struct nvme_completion cqe = nvmeq->cqes[head];
M
Matthew Wilcox 已提交
729
		if ((le16_to_cpu(cqe.status) & 1) != phase)
M
Matthew Wilcox 已提交
730 731 732 733
			break;
		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
734
			phase = !phase;
M
Matthew Wilcox 已提交
735 736
		}

737
		ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
738
		fn(nvmeq->dev, ctx, &cqe);
M
Matthew Wilcox 已提交
739 740 741 742 743 744 745 746
	}

	/* If the controller ignores the cq head doorbell and continuously
	 * writes to the queue, it is theoretically possible to wrap around
	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
	 * requires that 0.1% of your interrupts are handled, so this isn't
	 * a big problem.
	 */
M
Matthew Wilcox 已提交
747
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
748
		return 0;
M
Matthew Wilcox 已提交
749

750
	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
M
Matthew Wilcox 已提交
751
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
752
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
753

754 755
	nvmeq->cqe_seen = 1;
	return 1;
M
Matthew Wilcox 已提交
756 757
}

758 759 760 761 762 763
static void nvme_make_request(struct request_queue *q, struct bio *bio)
{
	struct nvme_ns *ns = q->queuedata;
	struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
	int result = -EBUSY;

764 765 766 767 768 769
	if (!nvmeq) {
		put_nvmeq(NULL);
		bio_endio(bio, -EIO);
		return;
	}

770
	spin_lock_irq(&nvmeq->q_lock);
771
	if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
772 773 774 775 776 777 778 779 780 781 782 783
		result = nvme_submit_bio_queue(nvmeq, ns, bio);
	if (unlikely(result)) {
		if (bio_list_empty(&nvmeq->sq_cong))
			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
		bio_list_add(&nvmeq->sq_cong, bio);
	}

	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
	put_nvmeq(nvmeq);
}

M
Matthew Wilcox 已提交
784
static irqreturn_t nvme_irq(int irq, void *data)
785 786 787 788
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
789 790 791
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
792 793 794 795 796 797 798 799 800 801 802 803 804
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
	struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
	if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
		return IRQ_NONE;
	return IRQ_WAKE_THREAD;
}

805 806 807
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
	spin_lock_irq(&nvmeq->q_lock);
808
	cancel_cmdid(nvmeq, cmdid, NULL);
809 810 811
	spin_unlock_irq(&nvmeq->q_lock);
}

812 813 814 815 816 817
struct sync_cmd_info {
	struct task_struct *task;
	u32 result;
	int status;
};

818
static void sync_completion(struct nvme_dev *dev, void *ctx,
819 820 821 822 823 824 825 826
						struct nvme_completion *cqe)
{
	struct sync_cmd_info *cmdinfo = ctx;
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	wake_up_process(cmdinfo->task);
}

M
Matthew Wilcox 已提交
827 828 829 830
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
831 832
static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
						struct nvme_command *cmd,
V
Vishal Verma 已提交
833
						u32 *result, unsigned timeout)
M
Matthew Wilcox 已提交
834
{
835
	int cmdid, ret;
M
Matthew Wilcox 已提交
836
	struct sync_cmd_info cmdinfo;
837 838 839 840 841 842 843
	struct nvme_queue *nvmeq;

	nvmeq = lock_nvmeq(dev, q_idx);
	if (!nvmeq) {
		unlock_nvmeq(nvmeq);
		return -ENODEV;
	}
M
Matthew Wilcox 已提交
844 845 846 847

	cmdinfo.task = current;
	cmdinfo.status = -EINTR;

848 849 850
	cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
	if (cmdid < 0) {
		unlock_nvmeq(nvmeq);
M
Matthew Wilcox 已提交
851
		return cmdid;
852
	}
M
Matthew Wilcox 已提交
853 854
	cmd->common.command_id = cmdid;

855
	set_current_state(TASK_KILLABLE);
856 857 858 859 860 861 862 863
	ret = nvme_submit_cmd(nvmeq, cmd);
	if (ret) {
		free_cmdid(nvmeq, cmdid, NULL);
		unlock_nvmeq(nvmeq);
		set_current_state(TASK_RUNNING);
		return ret;
	}
	unlock_nvmeq(nvmeq);
864
	schedule_timeout(timeout);
M
Matthew Wilcox 已提交
865

866
	if (cmdinfo.status == -EINTR) {
867 868 869 870
		nvmeq = lock_nvmeq(dev, q_idx);
		if (nvmeq)
			nvme_abort_command(nvmeq, cmdid);
		unlock_nvmeq(nvmeq);
871 872 873
		return -EINTR;
	}

M
Matthew Wilcox 已提交
874 875 876 877 878 879
	if (result)
		*result = cmdinfo.result;

	return cmdinfo.status;
}

K
Keith Busch 已提交
880 881 882 883 884 885 886 887 888 889 890
static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
			struct nvme_command *cmd,
			struct async_cmd_info *cmdinfo, unsigned timeout)
{
	int cmdid;

	cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
	if (cmdid < 0)
		return cmdid;
	cmdinfo->status = -EINTR;
	cmd->common.command_id = cmdid;
891
	return nvme_submit_cmd(nvmeq, cmd);
K
Keith Busch 已提交
892 893
}

V
Vishal Verma 已提交
894
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
M
Matthew Wilcox 已提交
895 896
								u32 *result)
{
897 898 899 900 901 902 903 904
	return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
}

int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
								u32 *result)
{
	return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
							NVME_IO_TIMEOUT);
M
Matthew Wilcox 已提交
905 906
}

K
Keith Busch 已提交
907 908 909
static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
		struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
{
910
	return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
K
Keith Busch 已提交
911 912 913
								ADMIN_TIMEOUT);
}

M
Matthew Wilcox 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
	int status;
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

V
Vishal Verma 已提交
981
int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
982 983 984 985 986 987 988 989 990 991 992 993 994
							dma_addr_t dma_addr)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.prp1 = cpu_to_le64(dma_addr);
	c.identify.cns = cpu_to_le32(cns);

	return nvme_submit_admin_cmd(dev, &c, NULL);
}

V
Vishal Verma 已提交
995
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
996
					dma_addr_t dma_addr, u32 *result)
997 998 999 1000 1001
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_get_features;
1002
	c.features.nsid = cpu_to_le32(nsid);
1003 1004 1005
	c.features.prp1 = cpu_to_le64(dma_addr);
	c.features.fid = cpu_to_le32(fid);

1006
	return nvme_submit_admin_cmd(dev, &c, result);
1007 1008
}

V
Vishal Verma 已提交
1009 1010
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
					dma_addr_t dma_addr, u32 *result)
1011 1012 1013 1014 1015 1016 1017 1018 1019
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_set_features;
	c.features.prp1 = cpu_to_le64(dma_addr);
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1020 1021 1022
	return nvme_submit_admin_cmd(dev, &c, result);
}

K
Keith Busch 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
/**
 * nvme_abort_cmd - Attempt aborting a command
 * @cmdid: Command id of a timed out IO
 * @queue: The queue with timed out IO
 *
 * Schedule controller reset if the command was already aborted once before and
 * still hasn't been returned to the driver, or if this is the admin queue.
 */
static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
{
	int a_cmdid;
	struct nvme_command cmd;
	struct nvme_dev *dev = nvmeq->dev;
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1037
	struct nvme_queue *adminq;
K
Keith Busch 已提交
1038 1039 1040 1041 1042 1043 1044 1045

	if (!nvmeq->qid || info[cmdid].aborted) {
		if (work_busy(&dev->reset_work))
			return;
		list_del_init(&dev->node);
		dev_warn(&dev->pci_dev->dev,
			"I/O %d QID %d timeout, reset controller\n", cmdid,
								nvmeq->qid);
M
Matthew Wilcox 已提交
1046
		PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
K
Keith Busch 已提交
1047 1048 1049 1050 1051 1052 1053
		queue_work(nvme_workq, &dev->reset_work);
		return;
	}

	if (!dev->abort_limit)
		return;

1054 1055
	adminq = rcu_dereference(dev->queues[0]);
	a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
K
Keith Busch 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
								ADMIN_TIMEOUT);
	if (a_cmdid < 0)
		return;

	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
	cmd.abort.cid = cmdid;
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
	cmd.abort.command_id = a_cmdid;

	--dev->abort_limit;
	info[cmdid].aborted = 1;
	info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;

	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
							nvmeq->qid);
1072
	nvme_submit_cmd(adminq, &cmd);
K
Keith Busch 已提交
1073 1074
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
/**
 * nvme_cancel_ios - Cancel outstanding I/Os
 * @queue: The queue to cancel I/Os on
 * @timeout: True to only cancel I/Os which have timed out
 */
static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
{
	int depth = nvmeq->q_depth - 1;
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
	unsigned long now = jiffies;
	int cmdid;

	for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
		void *ctx;
		nvme_completion_fn fn;
		static struct nvme_completion cqe = {
1091
			.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
1092 1093 1094 1095
		};

		if (timeout && !time_after(now, info[cmdid].timeout))
			continue;
1096 1097
		if (info[cmdid].ctx == CMD_CTX_CANCELLED)
			continue;
K
Keith Busch 已提交
1098 1099 1100 1101 1102 1103
		if (timeout && nvmeq->dev->initialized) {
			nvme_abort_cmd(cmdid, nvmeq);
			continue;
		}
		dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
								nvmeq->qid);
1104 1105 1106 1107 1108
		ctx = cancel_cmdid(nvmeq, cmdid, &fn);
		fn(nvmeq->dev, ctx, &cqe);
	}
}

1109
static void nvme_free_queue(struct rcu_head *r)
1110
{
1111 1112
	struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);

1113 1114 1115 1116 1117 1118 1119
	spin_lock_irq(&nvmeq->q_lock);
	while (bio_list_peek(&nvmeq->sq_cong)) {
		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
		bio_endio(bio, -EIO);
	}
	spin_unlock_irq(&nvmeq->q_lock);

1120 1121 1122 1123 1124 1125 1126
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

1127
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1128 1129 1130
{
	int i;

1131 1132
	for (i = num_possible_cpus(); i > dev->queue_count - 1; i--)
		rcu_assign_pointer(dev->queues[i], NULL);
1133
	for (i = dev->queue_count - 1; i >= lowest; i--) {
1134 1135 1136
		struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
		rcu_assign_pointer(dev->queues[i], NULL);
		call_rcu(&nvmeq->r_head, nvme_free_queue);
1137 1138 1139 1140
		dev->queue_count--;
	}
}

K
Keith Busch 已提交
1141 1142 1143 1144 1145 1146 1147
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 *
 * Returns 1 if already suspended, 0 otherwise.
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
M
Matthew Wilcox 已提交
1148
{
K
Keith Busch 已提交
1149
	int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
M
Matthew Wilcox 已提交
1150

1151
	spin_lock_irq(&nvmeq->q_lock);
1152 1153
	if (nvmeq->q_suspended) {
		spin_unlock_irq(&nvmeq->q_lock);
K
Keith Busch 已提交
1154
		return 1;
1155
	}
1156
	nvmeq->q_suspended = 1;
1157 1158
	spin_unlock_irq(&nvmeq->q_lock);

M
Matthew Wilcox 已提交
1159 1160
	irq_set_affinity_hint(vector, NULL);
	free_irq(vector, nvmeq);
M
Matthew Wilcox 已提交
1161

K
Keith Busch 已提交
1162 1163
	return 0;
}
M
Matthew Wilcox 已提交
1164

K
Keith Busch 已提交
1165 1166
static void nvme_clear_queue(struct nvme_queue *nvmeq)
{
1167 1168 1169 1170
	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	nvme_cancel_ios(nvmeq, false);
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1171 1172
}

K
Keith Busch 已提交
1173 1174
static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
1175
	struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
K
Keith Busch 已提交
1176 1177 1178 1179 1180 1181

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

K
Keith Busch 已提交
1182 1183 1184
	/* Don't tell the adapter to delete the admin queue.
	 * Don't tell a removed adapter to delete IO queues. */
	if (qid && readl(&dev->bar->csts) != -1) {
M
Matthew Wilcox 已提交
1185 1186 1187
		adapter_delete_sq(dev, qid);
		adapter_delete_cq(dev, qid);
	}
K
Keith Busch 已提交
1188
	nvme_clear_queue(nvmeq);
M
Matthew Wilcox 已提交
1189 1190 1191 1192 1193 1194
}

static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
							int depth, int vector)
{
	struct device *dmadev = &dev->pci_dev->dev;
1195
	unsigned extra = nvme_queue_extra(depth);
M
Matthew Wilcox 已提交
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
	if (!nvmeq)
		return NULL;

	nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
					&nvmeq->cq_dma_addr, GFP_KERNEL);
	if (!nvmeq->cqes)
		goto free_nvmeq;
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));

	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
	if (!nvmeq->sq_cmds)
		goto free_cqdma;

	nvmeq->q_dmadev = dmadev;
M
Matthew Wilcox 已提交
1212
	nvmeq->dev = dev;
1213 1214
	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
			dev->instance, qid);
M
Matthew Wilcox 已提交
1215 1216
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
1217
	nvmeq->cq_phase = 1;
M
Matthew Wilcox 已提交
1218
	init_waitqueue_head(&nvmeq->sq_full);
1219
	init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
M
Matthew Wilcox 已提交
1220
	bio_list_init(&nvmeq->sq_cong);
1221
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
M
Matthew Wilcox 已提交
1222 1223
	nvmeq->q_depth = depth;
	nvmeq->cq_vector = vector;
K
Keith Busch 已提交
1224
	nvmeq->qid = qid;
1225 1226
	nvmeq->q_suspended = 1;
	dev->queue_count++;
1227
	rcu_assign_pointer(dev->queues[qid], nvmeq);
M
Matthew Wilcox 已提交
1228 1229 1230 1231

	return nvmeq;

 free_cqdma:
1232
	dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
M
Matthew Wilcox 已提交
1233 1234 1235 1236 1237 1238
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

1239 1240 1241
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
							const char *name)
{
1242 1243
	if (use_threaded_interrupts)
		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1244
					nvme_irq_check, nvme_irq, IRQF_SHARED,
1245
					name, nvmeq);
1246
	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1247
				IRQF_SHARED, name, nvmeq);
1248 1249
}

1250
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
M
Matthew Wilcox 已提交
1251
{
1252 1253
	struct nvme_dev *dev = nvmeq->dev;
	unsigned extra = nvme_queue_extra(nvmeq->q_depth);
M
Matthew Wilcox 已提交
1254

1255 1256 1257
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1258
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	memset(nvmeq->cmdid_data, 0, extra);
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
	nvme_cancel_ios(nvmeq, false);
	nvmeq->q_suspended = 0;
}

static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
{
	struct nvme_dev *dev = nvmeq->dev;
	int result;
1269

M
Matthew Wilcox 已提交
1270 1271
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
1272
		return result;
M
Matthew Wilcox 已提交
1273 1274 1275 1276 1277

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

1278
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
M
Matthew Wilcox 已提交
1279 1280 1281
	if (result < 0)
		goto release_sq;

M
Matthew Wilcox 已提交
1282
	spin_lock_irq(&nvmeq->q_lock);
1283
	nvme_init_queue(nvmeq, qid);
M
Matthew Wilcox 已提交
1284
	spin_unlock_irq(&nvmeq->q_lock);
1285 1286

	return result;
M
Matthew Wilcox 已提交
1287 1288 1289 1290 1291

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
1292
	return result;
M
Matthew Wilcox 已提交
1293 1294
}

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
{
	unsigned long timeout;
	u32 bit = enabled ? NVME_CSTS_RDY : 0;

	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;

	while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
			dev_err(&dev->pci_dev->dev,
				"Device not ready; aborting initialisation\n");
			return -ENODEV;
		}
	}

	return 0;
}

/*
 * If the device has been passed off to us in an enabled state, just clear
 * the enabled bit.  The spec says we should set the 'shutdown notification
 * bits', but doing so may cause the device to complete commands to the
 * admin queue ... and we don't know what memory that might be pointing at!
 */
static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
{
1324 1325 1326 1327
	u32 cc = readl(&dev->bar->cc);

	if (cc & NVME_CC_ENABLE)
		writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
1328 1329 1330 1331 1332 1333 1334 1335
	return nvme_wait_ready(dev, cap, false);
}

static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
{
	return nvme_wait_ready(dev, cap, true);
}

K
Keith Busch 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
static int nvme_shutdown_ctrl(struct nvme_dev *dev)
{
	unsigned long timeout;
	u32 cc;

	cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL;
	writel(cc, &dev->bar->cc);

	timeout = 2 * HZ + jiffies;
	while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
							NVME_CSTS_SHST_CMPLT) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
		if (time_after(jiffies, timeout)) {
			dev_err(&dev->pci_dev->dev,
				"Device shutdown incomplete; abort shutdown\n");
			return -ENODEV;
		}
	}

	return 0;
}

1360
static int nvme_configure_admin_queue(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1361
{
1362
	int result;
M
Matthew Wilcox 已提交
1363
	u32 aqa;
1364
	u64 cap = readq(&dev->bar->cap);
M
Matthew Wilcox 已提交
1365 1366
	struct nvme_queue *nvmeq;

1367 1368 1369
	result = nvme_disable_ctrl(dev, cap);
	if (result < 0)
		return result;
M
Matthew Wilcox 已提交
1370

1371
	nvmeq = raw_nvmeq(dev, 0);
1372 1373 1374 1375 1376
	if (!nvmeq) {
		nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
		if (!nvmeq)
			return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1377 1378 1379 1380 1381 1382 1383

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

	dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
	dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1384
	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
M
Matthew Wilcox 已提交
1385 1386 1387 1388 1389 1390

	writel(aqa, &dev->bar->aqa);
	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
	writel(dev->ctrl_config, &dev->bar->cc);

1391
	result = nvme_enable_ctrl(dev, cap);
1392
	if (result)
1393
		return result;
1394

1395
	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1396
	if (result)
1397
		return result;
1398

M
Matthew Wilcox 已提交
1399
	spin_lock_irq(&nvmeq->q_lock);
1400
	nvme_init_queue(nvmeq, 0);
M
Matthew Wilcox 已提交
1401
	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
1402 1403 1404
	return result;
}

V
Vishal Verma 已提交
1405
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1406
				unsigned long addr, unsigned length)
M
Matthew Wilcox 已提交
1407
{
1408
	int i, err, count, nents, offset;
1409 1410
	struct scatterlist *sg;
	struct page **pages;
1411
	struct nvme_iod *iod;
1412 1413

	if (addr & 3)
1414
		return ERR_PTR(-EINVAL);
1415
	if (!length || length > INT_MAX - PAGE_SIZE)
1416
		return ERR_PTR(-EINVAL);
1417

1418
	offset = offset_in_page(addr);
1419 1420
	count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
	pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1421 1422
	if (!pages)
		return ERR_PTR(-ENOMEM);
1423 1424 1425 1426 1427 1428 1429

	err = get_user_pages_fast(addr, count, 1, pages);
	if (err < count) {
		count = err;
		err = -EFAULT;
		goto put_pages;
	}
1430

1431 1432
	iod = nvme_alloc_iod(count, length, GFP_KERNEL);
	sg = iod->sg;
1433
	sg_init_table(sg, count);
1434 1435
	for (i = 0; i < count; i++) {
		sg_set_page(&sg[i], pages[i],
1436 1437
			    min_t(unsigned, length, PAGE_SIZE - offset),
			    offset);
1438 1439
		length -= (PAGE_SIZE - offset);
		offset = 0;
1440
	}
1441
	sg_mark_end(&sg[i - 1]);
1442
	iod->nents = count;
1443 1444 1445 1446

	err = -ENOMEM;
	nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1447
	if (!nents)
1448
		goto free_iod;
M
Matthew Wilcox 已提交
1449

1450
	kfree(pages);
1451
	return iod;
M
Matthew Wilcox 已提交
1452

1453 1454
 free_iod:
	kfree(iod);
1455 1456 1457 1458
 put_pages:
	for (i = 0; i < count; i++)
		put_page(pages[i]);
	kfree(pages);
1459
	return ERR_PTR(err);
1460
}
M
Matthew Wilcox 已提交
1461

V
Vishal Verma 已提交
1462
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1463
			struct nvme_iod *iod)
1464
{
1465
	int i;
M
Matthew Wilcox 已提交
1466

1467 1468
	dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1469

1470 1471
	for (i = 0; i < iod->nents; i++)
		put_page(sg_page(&iod->sg[i]));
1472
}
M
Matthew Wilcox 已提交
1473

M
Matthew Wilcox 已提交
1474 1475 1476 1477 1478
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_user_io io;
	struct nvme_command c;
1479 1480 1481 1482 1483
	unsigned length, meta_len;
	int status, i;
	struct nvme_iod *iod, *meta_iod = NULL;
	dma_addr_t meta_dma_addr;
	void *meta, *uninitialized_var(meta_mem);
M
Matthew Wilcox 已提交
1484 1485 1486

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
1487
	length = (io.nblocks + 1) << ns->lba_shift;
1488 1489 1490 1491
	meta_len = (io.nblocks + 1) * ns->ms;

	if (meta_len && ((io.metadata & 3) || !io.metadata))
		return -EINVAL;
1492 1493 1494 1495

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
M
Matthew Wilcox 已提交
1496
	case nvme_cmd_compare:
1497
		iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
M
Matthew Wilcox 已提交
1498
		break;
1499
	default:
M
Matthew Wilcox 已提交
1500
		return -EINVAL;
1501 1502
	}

1503 1504
	if (IS_ERR(iod))
		return PTR_ERR(iod);
M
Matthew Wilcox 已提交
1505 1506 1507 1508

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
1509
	c.rw.nsid = cpu_to_le32(ns->ns_id);
M
Matthew Wilcox 已提交
1510
	c.rw.slba = cpu_to_le64(io.slba);
1511
	c.rw.length = cpu_to_le16(io.nblocks);
M
Matthew Wilcox 已提交
1512
	c.rw.control = cpu_to_le16(io.control);
1513 1514 1515 1516
	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);
1517 1518

	if (meta_len) {
K
Keith Busch 已提交
1519 1520
		meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
								meta_len);
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
		if (IS_ERR(meta_iod)) {
			status = PTR_ERR(meta_iod);
			meta_iod = NULL;
			goto unmap;
		}

		meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
						&meta_dma_addr, GFP_KERNEL);
		if (!meta_mem) {
			status = -ENOMEM;
			goto unmap;
		}

		if (io.opcode & 1) {
			int meta_offset = 0;

			for (i = 0; i < meta_iod->nents; i++) {
				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
						meta_iod->sg[i].offset;
				memcpy(meta_mem + meta_offset, meta,
						meta_iod->sg[i].length);
				kunmap_atomic(meta);
				meta_offset += meta_iod->sg[i].length;
			}
		}

		c.rw.metadata = cpu_to_le64(meta_dma_addr);
	}

1550
	length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
M
Matthew Wilcox 已提交
1551

1552 1553 1554
	if (length != (io.nblocks + 1) << ns->lba_shift)
		status = -ENOMEM;
	else
1555
		status = nvme_submit_io_cmd(dev, &c, NULL);
M
Matthew Wilcox 已提交
1556

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
	if (meta_len) {
		if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
			int meta_offset = 0;

			for (i = 0; i < meta_iod->nents; i++) {
				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
						meta_iod->sg[i].offset;
				memcpy(meta, meta_mem + meta_offset,
						meta_iod->sg[i].length);
				kunmap_atomic(meta);
				meta_offset += meta_iod->sg[i].length;
			}
		}

		dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
								meta_dma_addr);
	}

 unmap:
1576
	nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1577
	nvme_free_iod(dev, iod);
1578 1579 1580 1581 1582 1583

	if (meta_iod) {
		nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
		nvme_free_iod(dev, meta_iod);
	}

M
Matthew Wilcox 已提交
1584 1585 1586
	return status;
}

1587
static int nvme_user_admin_cmd(struct nvme_dev *dev,
M
Matthew Wilcox 已提交
1588
					struct nvme_admin_cmd __user *ucmd)
1589
{
M
Matthew Wilcox 已提交
1590
	struct nvme_admin_cmd cmd;
1591
	struct nvme_command c;
1592
	int status, length;
1593
	struct nvme_iod *uninitialized_var(iod);
1594
	unsigned timeout;
1595

M
Matthew Wilcox 已提交
1596 1597 1598
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1599 1600 1601
		return -EFAULT;

	memset(&c, 0, sizeof(c));
M
Matthew Wilcox 已提交
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);

	length = cmd.data_len;
	if (cmd.data_len) {
1616 1617
		iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
								length);
1618 1619 1620 1621
		if (IS_ERR(iod))
			return PTR_ERR(iod);
		length = nvme_setup_prps(dev, &c.common, iod, length,
								GFP_KERNEL);
M
Matthew Wilcox 已提交
1622 1623
	}

1624 1625
	timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
								ADMIN_TIMEOUT;
M
Matthew Wilcox 已提交
1626
	if (length != cmd.data_len)
1627 1628
		status = -ENOMEM;
	else
1629
		status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
1630

M
Matthew Wilcox 已提交
1631
	if (cmd.data_len) {
1632
		nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1633
		nvme_free_iod(dev, iod);
M
Matthew Wilcox 已提交
1634
	}
1635

1636
	if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1637 1638 1639
							sizeof(cmd.result)))
		status = -EFAULT;

1640 1641 1642
	return status;
}

M
Matthew Wilcox 已提交
1643 1644 1645 1646 1647 1648
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
							unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
M
Matthew Wilcox 已提交
1649
	case NVME_IOCTL_ID:
1650
		force_successful_syscall_return();
M
Matthew Wilcox 已提交
1651 1652
		return ns->ns_id;
	case NVME_IOCTL_ADMIN_CMD:
1653
		return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
M
Matthew Wilcox 已提交
1654 1655
	case NVME_IOCTL_SUBMIT_IO:
		return nvme_submit_io(ns, (void __user *)arg);
V
Vishal Verma 已提交
1656 1657 1658 1659
	case SG_GET_VERSION_NUM:
		return nvme_sg_get_version_num((void __user *)arg);
	case SG_IO:
		return nvme_sg_io(ns, (void __user *)arg);
M
Matthew Wilcox 已提交
1660 1661 1662 1663 1664
	default:
		return -ENOTTY;
	}
}

K
Keith Busch 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
#ifdef CONFIG_COMPAT
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
					unsigned int cmd, unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
	case SG_IO:
		return nvme_sg_io32(ns, arg);
	}
	return nvme_ioctl(bdev, mode, cmd, arg);
}
#else
#define nvme_compat_ioctl	NULL
#endif

1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;
	struct nvme_dev *dev = ns->dev;

	kref_get(&dev->kref);
	return 0;
}

static void nvme_free_dev(struct kref *kref);

static void nvme_release(struct gendisk *disk, fmode_t mode)
{
	struct nvme_ns *ns = disk->private_data;
	struct nvme_dev *dev = ns->dev;

	kref_put(&dev->kref, nvme_free_dev);
}

M
Matthew Wilcox 已提交
1700 1701 1702
static const struct block_device_operations nvme_fops = {
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
K
Keith Busch 已提交
1703
	.compat_ioctl	= nvme_compat_ioctl,
1704 1705
	.open		= nvme_open,
	.release	= nvme_release,
M
Matthew Wilcox 已提交
1706 1707
};

1708 1709 1710 1711 1712
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
	while (bio_list_peek(&nvmeq->sq_cong)) {
		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
		struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1713 1714 1715 1716

		if (bio_list_empty(&nvmeq->sq_cong))
			remove_wait_queue(&nvmeq->sq_full,
							&nvmeq->sq_cong_wait);
1717
		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1718 1719 1720
			if (bio_list_empty(&nvmeq->sq_cong))
				add_wait_queue(&nvmeq->sq_full,
							&nvmeq->sq_cong_wait);
1721 1722 1723 1724 1725 1726 1727 1728
			bio_list_add_head(&nvmeq->sq_cong, bio);
			break;
		}
	}
}

static int nvme_kthread(void *data)
{
1729
	struct nvme_dev *dev, *next;
1730 1731

	while (!kthread_should_stop()) {
1732
		set_current_state(TASK_INTERRUPTIBLE);
1733
		spin_lock(&dev_list_lock);
1734
		list_for_each_entry_safe(dev, next, &dev_list, node) {
1735
			int i;
1736 1737 1738 1739 1740 1741 1742
			if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
							dev->initialized) {
				if (work_busy(&dev->reset_work))
					continue;
				list_del_init(&dev->node);
				dev_warn(&dev->pci_dev->dev,
					"Failed status, reset controller\n");
M
Matthew Wilcox 已提交
1743
				PREPARE_WORK(&dev->reset_work,
1744 1745 1746 1747
							nvme_reset_failed_dev);
				queue_work(nvme_workq, &dev->reset_work);
				continue;
			}
1748
			rcu_read_lock();
1749
			for (i = 0; i < dev->queue_count; i++) {
1750 1751
				struct nvme_queue *nvmeq =
						rcu_dereference(dev->queues[i]);
1752 1753
				if (!nvmeq)
					continue;
1754
				spin_lock_irq(&nvmeq->q_lock);
1755 1756
				if (nvmeq->q_suspended)
					goto unlock;
1757
				nvme_process_cq(nvmeq);
1758
				nvme_cancel_ios(nvmeq, true);
1759
				nvme_resubmit_bios(nvmeq);
1760
 unlock:
1761 1762
				spin_unlock_irq(&nvmeq->q_lock);
			}
1763
			rcu_read_unlock();
1764 1765
		}
		spin_unlock(&dev_list_lock);
1766
		schedule_timeout(round_jiffies_relative(HZ));
1767 1768 1769 1770
	}
	return 0;
}

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static void nvme_config_discard(struct nvme_ns *ns)
{
	u32 logical_block_size = queue_logical_block_size(ns->queue);
	ns->queue->limits.discard_zeroes_data = 0;
	ns->queue->limits.discard_alignment = logical_block_size;
	ns->queue->limits.discard_granularity = logical_block_size;
	ns->queue->limits.max_discard_sectors = 0xffffffff;
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}

1781
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
M
Matthew Wilcox 已提交
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
	int lbaf;

	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
		return NULL;

	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
	if (!ns)
		return NULL;
	ns->queue = blk_alloc_queue(GFP_KERNEL);
	if (!ns->queue)
		goto out_free_ns;
M
Matthew Wilcox 已提交
1797 1798 1799
	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
M
Matthew Wilcox 已提交
1800 1801 1802 1803
	blk_queue_make_request(ns->queue, nvme_make_request);
	ns->dev = dev;
	ns->queue->queuedata = ns;

1804
	disk = alloc_disk(0);
M
Matthew Wilcox 已提交
1805 1806
	if (!disk)
		goto out_free_queue;
1807
	ns->ns_id = nsid;
M
Matthew Wilcox 已提交
1808 1809 1810
	ns->disk = disk;
	lbaf = id->flbas & 0xf;
	ns->lba_shift = id->lbaf[lbaf].ds;
1811
	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1812
	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
K
Keith Busch 已提交
1813 1814
	if (dev->max_hw_sectors)
		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
M
Matthew Wilcox 已提交
1815 1816

	disk->major = nvme_major;
1817
	disk->first_minor = 0;
M
Matthew Wilcox 已提交
1818 1819 1820
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
1821
	disk->driverfs_dev = &dev->pci_dev->dev;
1822
	disk->flags = GENHD_FL_EXT_DEVT;
1823
	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
M
Matthew Wilcox 已提交
1824 1825
	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

1826 1827 1828
	if (dev->oncs & NVME_CTRL_ONCS_DSM)
		nvme_config_discard(ns);

M
Matthew Wilcox 已提交
1829 1830 1831 1832 1833 1834 1835 1836 1837
	return ns;

 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
	return NULL;
}

1838
static int set_queue_count(struct nvme_dev *dev, int count)
M
Matthew Wilcox 已提交
1839 1840 1841
{
	int status;
	u32 result;
1842
	u32 q_count = (count - 1) | ((count - 1) << 16);
M
Matthew Wilcox 已提交
1843

1844
	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1845
								&result);
M
Matthew Wilcox 已提交
1846
	if (status)
1847
		return status < 0 ? -EIO : -EBUSY;
M
Matthew Wilcox 已提交
1848 1849 1850
	return min(result & 0xffff, result >> 16) + 1;
}

K
Keith Busch 已提交
1851 1852
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
1853
	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
K
Keith Busch 已提交
1854 1855
}

1856
static int nvme_setup_io_queues(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1857
{
1858
	struct nvme_queue *adminq = raw_nvmeq(dev, 0);
R
Ramachandra Rao Gajula 已提交
1859
	struct pci_dev *pdev = dev->pci_dev;
K
Keith Busch 已提交
1860
	int result, cpu, i, vecs, nr_io_queues, size, q_depth;
M
Matthew Wilcox 已提交
1861

1862 1863
	nr_io_queues = num_online_cpus();
	result = set_queue_count(dev, nr_io_queues);
M
Matthew Wilcox 已提交
1864 1865
	if (result < 0)
		return result;
1866 1867
	if (result < nr_io_queues)
		nr_io_queues = result;
M
Matthew Wilcox 已提交
1868

K
Keith Busch 已提交
1869 1870
	size = db_bar_size(dev, nr_io_queues);
	if (size > 8192) {
1871
		iounmap(dev->bar);
K
Keith Busch 已提交
1872 1873 1874 1875 1876 1877 1878 1879
		do {
			dev->bar = ioremap(pci_resource_start(pdev, 0), size);
			if (dev->bar)
				break;
			if (!--nr_io_queues)
				return -ENOMEM;
			size = db_bar_size(dev, nr_io_queues);
		} while (1);
1880
		dev->dbs = ((void __iomem *)dev->bar) + 4096;
1881
		adminq->q_db = dev->dbs;
1882 1883
	}

K
Keith Busch 已提交
1884
	/* Deregister the admin queue's interrupt */
1885
	free_irq(dev->entry[0].vector, adminq);
K
Keith Busch 已提交
1886

1887 1888
	vecs = nr_io_queues;
	for (i = 0; i < vecs; i++)
M
Matthew Wilcox 已提交
1889 1890
		dev->entry[i].entry = i;
	for (;;) {
1891 1892
		result = pci_enable_msix(pdev, dev->entry, vecs);
		if (result <= 0)
M
Matthew Wilcox 已提交
1893
			break;
1894
		vecs = result;
M
Matthew Wilcox 已提交
1895 1896
	}

1897 1898 1899 1900
	if (result < 0) {
		vecs = nr_io_queues;
		if (vecs > 32)
			vecs = 32;
R
Ramachandra Rao Gajula 已提交
1901
		for (;;) {
1902
			result = pci_enable_msi_block(pdev, vecs);
R
Ramachandra Rao Gajula 已提交
1903
			if (result == 0) {
1904
				for (i = 0; i < vecs; i++)
R
Ramachandra Rao Gajula 已提交
1905 1906
					dev->entry[i].vector = i + pdev->irq;
				break;
1907 1908
			} else if (result < 0) {
				vecs = 1;
R
Ramachandra Rao Gajula 已提交
1909 1910
				break;
			}
1911
			vecs = result;
R
Ramachandra Rao Gajula 已提交
1912 1913 1914
		}
	}

1915 1916 1917 1918 1919 1920 1921 1922
	/*
	 * Should investigate if there's a performance win from allocating
	 * more queues than interrupt vectors; it might allow the submission
	 * path to scale better, even if the receive path is limited by the
	 * number of interrupts.
	 */
	nr_io_queues = vecs;

1923
	result = queue_request_irq(dev, adminq, adminq->irqname);
K
Keith Busch 已提交
1924
	if (result) {
1925
		adminq->q_suspended = 1;
1926
		goto free_queues;
K
Keith Busch 已提交
1927
	}
M
Matthew Wilcox 已提交
1928

1929
	/* Free previously allocated queues that are no longer usable */
1930
	nvme_free_queues(dev, nr_io_queues);
1931

M
Matthew Wilcox 已提交
1932
	cpu = cpumask_first(cpu_online_mask);
1933
	for (i = 0; i < nr_io_queues; i++) {
M
Matthew Wilcox 已提交
1934 1935 1936 1937
		irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
		cpu = cpumask_next(cpu, cpu_online_mask);
	}

1938 1939
	q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
								NVME_Q_DEPTH);
1940
	for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
1941
		if (!nvme_alloc_queue(dev, i + 1, q_depth, i)) {
1942 1943 1944
			result = -ENOMEM;
			goto free_queues;
		}
M
Matthew Wilcox 已提交
1945
	}
M
Matthew Wilcox 已提交
1946

M
Matthew Wilcox 已提交
1947 1948
	for (; i < num_possible_cpus(); i++) {
		int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1949
		rcu_assign_pointer(dev->queues[i + 1], dev->queues[target + 1]);
M
Matthew Wilcox 已提交
1950 1951
	}

1952
	for (i = 1; i < dev->queue_count; i++) {
1953
		result = nvme_create_queue(raw_nvmeq(dev, i), i);
1954 1955 1956 1957 1958 1959
		if (result) {
			for (--i; i > 0; i--)
				nvme_disable_queue(dev, i);
			goto free_queues;
		}
	}
M
Matthew Wilcox 已提交
1960

1961
	return 0;
M
Matthew Wilcox 已提交
1962

1963
 free_queues:
1964
	nvme_free_queues(dev, 1);
1965
	return result;
M
Matthew Wilcox 已提交
1966 1967
}

1968 1969 1970 1971 1972 1973
/*
 * Return: error value if an error occurred setting up the queues or calling
 * Identify Device.  0 if these succeeded, even if adding some of the
 * namespaces failed.  At the moment, these failures are silent.  TBD which
 * failures should be reported.
 */
1974
static int nvme_dev_add(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
1975
{
1976
	struct pci_dev *pdev = dev->pci_dev;
1977 1978
	int res;
	unsigned nn, i;
1979
	struct nvme_ns *ns;
1980
	struct nvme_id_ctrl *ctrl;
1981 1982
	struct nvme_id_ns *id_ns;
	void *mem;
M
Matthew Wilcox 已提交
1983
	dma_addr_t dma_addr;
1984
	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
M
Matthew Wilcox 已提交
1985

1986
	mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
1987 1988
	if (!mem)
		return -ENOMEM;
M
Matthew Wilcox 已提交
1989

1990
	res = nvme_identify(dev, 0, 1, dma_addr);
M
Matthew Wilcox 已提交
1991 1992
	if (res) {
		res = -EIO;
1993
		goto out;
M
Matthew Wilcox 已提交
1994 1995
	}

1996
	ctrl = mem;
1997
	nn = le32_to_cpup(&ctrl->nn);
1998
	dev->oncs = le16_to_cpup(&ctrl->oncs);
K
Keith Busch 已提交
1999
	dev->abort_limit = ctrl->acl + 1;
2000 2001 2002
	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
2003
	if (ctrl->mdts)
K
Keith Busch 已提交
2004
		dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
2005 2006
	if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
			(pdev->device == 0x0953) && ctrl->vs[3])
2007
		dev->stripe_size = 1 << (ctrl->vs[3] + shift);
M
Matthew Wilcox 已提交
2008

2009
	id_ns = mem;
M
Matthew Wilcox 已提交
2010
	for (i = 1; i <= nn; i++) {
2011
		res = nvme_identify(dev, i, 0, dma_addr);
M
Matthew Wilcox 已提交
2012 2013 2014
		if (res)
			continue;

2015
		if (id_ns->ncap == 0)
M
Matthew Wilcox 已提交
2016 2017
			continue;

2018
		res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
2019
							dma_addr + 4096, NULL);
M
Matthew Wilcox 已提交
2020
		if (res)
2021
			memset(mem + 4096, 0, 4096);
M
Matthew Wilcox 已提交
2022

2023
		ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
M
Matthew Wilcox 已提交
2024 2025 2026 2027 2028
		if (ns)
			list_add_tail(&ns->list, &dev->namespaces);
	}
	list_for_each_entry(ns, &dev->namespaces, list)
		add_disk(ns->disk);
2029
	res = 0;
M
Matthew Wilcox 已提交
2030

2031
 out:
2032
	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
M
Matthew Wilcox 已提交
2033 2034 2035
	return res;
}

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
static int nvme_dev_map(struct nvme_dev *dev)
{
	int bars, result = -ENOMEM;
	struct pci_dev *pdev = dev->pci_dev;

	if (pci_enable_device_mem(pdev))
		return result;

	dev->entry[0].vector = pdev->irq;
	pci_set_master(pdev);
	bars = pci_select_bars(pdev, IORESOURCE_MEM);
	if (pci_request_selected_regions(pdev, bars, "nvme"))
		goto disable_pci;

2050 2051 2052
	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
		goto disable;
2053 2054 2055 2056

	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar)
		goto disable;
K
Keith Busch 已提交
2057 2058 2059 2060
	if (readl(&dev->bar->csts) == -1) {
		result = -ENODEV;
		goto unmap;
	}
2061
	dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
2062 2063 2064 2065
	dev->dbs = ((void __iomem *)dev->bar) + 4096;

	return 0;

K
Keith Busch 已提交
2066 2067 2068
 unmap:
	iounmap(dev->bar);
	dev->bar = NULL;
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
 disable:
	pci_release_regions(pdev);
 disable_pci:
	pci_disable_device(pdev);
	return result;
}

static void nvme_dev_unmap(struct nvme_dev *dev)
{
	if (dev->pci_dev->msi_enabled)
		pci_disable_msi(dev->pci_dev);
	else if (dev->pci_dev->msix_enabled)
		pci_disable_msix(dev->pci_dev);

	if (dev->bar) {
		iounmap(dev->bar);
		dev->bar = NULL;
K
Keith Busch 已提交
2086
		pci_release_regions(dev->pci_dev);
2087 2088 2089 2090 2091 2092
	}

	if (pci_is_enabled(dev->pci_dev))
		pci_disable_device(dev->pci_dev);
}

K
Keith Busch 已提交
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
struct nvme_delq_ctx {
	struct task_struct *waiter;
	struct kthread_worker *worker;
	atomic_t refcount;
};

static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
{
	dq->waiter = current;
	mb();

	for (;;) {
		set_current_state(TASK_KILLABLE);
		if (!atomic_read(&dq->refcount))
			break;
		if (!schedule_timeout(ADMIN_TIMEOUT) ||
					fatal_signal_pending(current)) {
			set_current_state(TASK_RUNNING);

			nvme_disable_ctrl(dev, readq(&dev->bar->cap));
			nvme_disable_queue(dev, 0);

			send_sig(SIGKILL, dq->worker->task, 1);
			flush_kthread_worker(dq->worker);
			return;
		}
	}
	set_current_state(TASK_RUNNING);
}

static void nvme_put_dq(struct nvme_delq_ctx *dq)
{
	atomic_dec(&dq->refcount);
	if (dq->waiter)
		wake_up_process(dq->waiter);
}

static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
{
	atomic_inc(&dq->refcount);
	return dq;
}

static void nvme_del_queue_end(struct nvme_queue *nvmeq)
{
	struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;

	nvme_clear_queue(nvmeq);
	nvme_put_dq(dq);
}

static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
						kthread_work_func_t fn)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(nvmeq->qid);

	init_kthread_work(&nvmeq->cmdinfo.work, fn);
	return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
}

static void nvme_del_cq_work_handler(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	nvme_del_queue_end(nvmeq);
}

static int nvme_delete_cq(struct nvme_queue *nvmeq)
{
	return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
						nvme_del_cq_work_handler);
}

static void nvme_del_sq_work_handler(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	int status = nvmeq->cmdinfo.status;

	if (!status)
		status = nvme_delete_cq(nvmeq);
	if (status)
		nvme_del_queue_end(nvmeq);
}

static int nvme_delete_sq(struct nvme_queue *nvmeq)
{
	return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
						nvme_del_sq_work_handler);
}

static void nvme_del_queue_start(struct kthread_work *work)
{
	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
							cmdinfo.work);
	allow_signal(SIGKILL);
	if (nvme_delete_sq(nvmeq))
		nvme_del_queue_end(nvmeq);
}

static void nvme_disable_io_queues(struct nvme_dev *dev)
{
	int i;
	DEFINE_KTHREAD_WORKER_ONSTACK(worker);
	struct nvme_delq_ctx dq;
	struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
					&worker, "nvme%d", dev->instance);

	if (IS_ERR(kworker_task)) {
		dev_err(&dev->pci_dev->dev,
			"Failed to create queue del task\n");
		for (i = dev->queue_count - 1; i > 0; i--)
			nvme_disable_queue(dev, i);
		return;
	}

	dq.waiter = NULL;
	atomic_set(&dq.refcount, 0);
	dq.worker = &worker;
	for (i = dev->queue_count - 1; i > 0; i--) {
2217
		struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
K
Keith Busch 已提交
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229

		if (nvme_suspend_queue(nvmeq))
			continue;
		nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
		nvmeq->cmdinfo.worker = dq.worker;
		init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
		queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
	}
	nvme_wait_dq(&dq, dev);
	kthread_stop(kworker_task);
}

2230
static void nvme_dev_shutdown(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2231
{
2232 2233
	int i;

2234
	dev->initialized = 0;
M
Matthew Wilcox 已提交
2235

2236
	spin_lock(&dev_list_lock);
2237
	list_del_init(&dev->node);
2238 2239
	spin_unlock(&dev_list_lock);

K
Keith Busch 已提交
2240 2241
	if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
		for (i = dev->queue_count - 1; i >= 0; i--) {
2242
			struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
K
Keith Busch 已提交
2243 2244 2245 2246 2247
			nvme_suspend_queue(nvmeq);
			nvme_clear_queue(nvmeq);
		}
	} else {
		nvme_disable_io_queues(dev);
K
Keith Busch 已提交
2248
		nvme_shutdown_ctrl(dev);
K
Keith Busch 已提交
2249 2250
		nvme_disable_queue(dev, 0);
	}
2251 2252 2253 2254 2255
	nvme_dev_unmap(dev);
}

static void nvme_dev_remove(struct nvme_dev *dev)
{
2256
	struct nvme_ns *ns;
2257

2258 2259 2260 2261 2262
	list_for_each_entry(ns, &dev->namespaces, list) {
		if (ns->disk->flags & GENHD_FL_UP)
			del_gendisk(ns->disk);
		if (!blk_queue_dying(ns->queue))
			blk_cleanup_queue(ns->queue);
M
Matthew Wilcox 已提交
2263 2264 2265
	}
}

M
Matthew Wilcox 已提交
2266 2267 2268 2269 2270 2271 2272 2273
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
	struct device *dmadev = &dev->pci_dev->dev;
	dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

2274 2275 2276 2277 2278 2279 2280
	/* Optimisation for I/Os between 4k and 128k */
	dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
2281 2282 2283 2284 2285 2286
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
2287
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
2288 2289
}

2290 2291 2292
static DEFINE_IDA(nvme_instance_ida);

static int nvme_set_instance(struct nvme_dev *dev)
M
Matthew Wilcox 已提交
2293
{
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
	int instance, error;

	do {
		if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
			return -ENODEV;

		spin_lock(&dev_list_lock);
		error = ida_get_new(&nvme_instance_ida, &instance);
		spin_unlock(&dev_list_lock);
	} while (error == -EAGAIN);

	if (error)
		return -ENODEV;

	dev->instance = instance;
	return 0;
M
Matthew Wilcox 已提交
2310 2311 2312 2313
}

static void nvme_release_instance(struct nvme_dev *dev)
{
2314 2315 2316
	spin_lock(&dev_list_lock);
	ida_remove(&nvme_instance_ida, dev->instance);
	spin_unlock(&dev_list_lock);
M
Matthew Wilcox 已提交
2317 2318
}

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
static void nvme_free_namespaces(struct nvme_dev *dev)
{
	struct nvme_ns *ns, *next;

	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
		put_disk(ns->disk);
		kfree(ns);
	}
}

2330 2331 2332
static void nvme_free_dev(struct kref *kref)
{
	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2333 2334

	nvme_free_namespaces(dev);
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
}

static int nvme_dev_open(struct inode *inode, struct file *f)
{
	struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
								miscdev);
	kref_get(&dev->kref);
	f->private_data = dev;
	return 0;
}

static int nvme_dev_release(struct inode *inode, struct file *f)
{
	struct nvme_dev *dev = f->private_data;
	kref_put(&dev->kref, nvme_free_dev);
	return 0;
}

static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
	struct nvme_dev *dev = f->private_data;
	switch (cmd) {
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_admin_cmd(dev, (void __user *)arg);
	default:
		return -ENOTTY;
	}
}

static const struct file_operations nvme_dev_fops = {
	.owner		= THIS_MODULE,
	.open		= nvme_dev_open,
	.release	= nvme_dev_release,
	.unlocked_ioctl	= nvme_dev_ioctl,
	.compat_ioctl	= nvme_dev_ioctl,
};

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
static int nvme_dev_start(struct nvme_dev *dev)
{
	int result;

	result = nvme_dev_map(dev);
	if (result)
		return result;

	result = nvme_configure_admin_queue(dev);
	if (result)
		goto unmap;

	spin_lock(&dev_list_lock);
	list_add(&dev->node, &dev_list);
	spin_unlock(&dev_list_lock);

	result = nvme_setup_io_queues(dev);
2392
	if (result && result != -EBUSY)
2393 2394
		goto disable;

2395
	return result;
2396 2397

 disable:
2398
	nvme_disable_queue(dev, 0);
2399 2400 2401 2402 2403 2404 2405 2406
	spin_lock(&dev_list_lock);
	list_del_init(&dev->node);
	spin_unlock(&dev_list_lock);
 unmap:
	nvme_dev_unmap(dev);
	return result;
}

K
Keith Busch 已提交
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
static int nvme_remove_dead_ctrl(void *arg)
{
	struct nvme_dev *dev = (struct nvme_dev *)arg;
	struct pci_dev *pdev = dev->pci_dev;

	if (pci_get_drvdata(pdev))
		pci_stop_and_remove_bus_device(pdev);
	kref_put(&dev->kref, nvme_free_dev);
	return 0;
}

static void nvme_remove_disks(struct work_struct *ws)
{
	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);

	nvme_dev_remove(dev);
2423
	nvme_free_queues(dev, 1);
K
Keith Busch 已提交
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
}

static int nvme_dev_resume(struct nvme_dev *dev)
{
	int ret;

	ret = nvme_dev_start(dev);
	if (ret && ret != -EBUSY)
		return ret;
	if (ret == -EBUSY) {
		spin_lock(&dev_list_lock);
M
Matthew Wilcox 已提交
2435
		PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
K
Keith Busch 已提交
2436 2437 2438
		queue_work(nvme_workq, &dev->reset_work);
		spin_unlock(&dev_list_lock);
	}
2439
	dev->initialized = 1;
K
Keith Busch 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
	return 0;
}

static void nvme_dev_reset(struct nvme_dev *dev)
{
	nvme_dev_shutdown(dev);
	if (nvme_dev_resume(dev)) {
		dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
		kref_get(&dev->kref);
		if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
							dev->instance))) {
			dev_err(&dev->pci_dev->dev,
				"Failed to start controller remove task\n");
			kref_put(&dev->kref, nvme_free_dev);
		}
	}
}

static void nvme_reset_failed_dev(struct work_struct *ws)
{
	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
	nvme_dev_reset(dev);
}

2464
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
M
Matthew Wilcox 已提交
2465
{
2466
	int result = -ENOMEM;
M
Matthew Wilcox 已提交
2467 2468 2469 2470 2471 2472 2473 2474 2475
	struct nvme_dev *dev;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
	dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
								GFP_KERNEL);
	if (!dev->entry)
		goto free;
M
Matthew Wilcox 已提交
2476 2477
	dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
								GFP_KERNEL);
M
Matthew Wilcox 已提交
2478 2479 2480 2481
	if (!dev->queues)
		goto free;

	INIT_LIST_HEAD(&dev->namespaces);
M
Matthew Wilcox 已提交
2482
	INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
M
Matthew Wilcox 已提交
2483
	dev->pci_dev = pdev;
K
Keith Busch 已提交
2484
	pci_set_drvdata(pdev, dev);
2485 2486
	result = nvme_set_instance(dev);
	if (result)
2487
		goto free;
M
Matthew Wilcox 已提交
2488

M
Matthew Wilcox 已提交
2489 2490
	result = nvme_setup_prp_pools(dev);
	if (result)
2491
		goto release;
M
Matthew Wilcox 已提交
2492

2493
	kref_init(&dev->kref);
2494
	result = nvme_dev_start(dev);
2495 2496 2497
	if (result) {
		if (result == -EBUSY)
			goto create_cdev;
2498
		goto release_pools;
2499
	}
M
Matthew Wilcox 已提交
2500

2501
	result = nvme_dev_add(dev);
2502
	if (result)
2503
		goto shutdown;
2504

2505
 create_cdev:
2506 2507 2508 2509 2510 2511 2512 2513 2514
	scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
	dev->miscdev.minor = MISC_DYNAMIC_MINOR;
	dev->miscdev.parent = &pdev->dev;
	dev->miscdev.name = dev->name;
	dev->miscdev.fops = &nvme_dev_fops;
	result = misc_register(&dev->miscdev);
	if (result)
		goto remove;

2515
	dev->initialized = 1;
M
Matthew Wilcox 已提交
2516 2517
	return 0;

2518 2519
 remove:
	nvme_dev_remove(dev);
2520
	nvme_free_namespaces(dev);
2521 2522
 shutdown:
	nvme_dev_shutdown(dev);
2523
 release_pools:
2524
	nvme_free_queues(dev, 0);
M
Matthew Wilcox 已提交
2525
	nvme_release_prp_pools(dev);
2526 2527
 release:
	nvme_release_instance(dev);
M
Matthew Wilcox 已提交
2528 2529 2530 2531 2532 2533 2534
 free:
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
	return result;
}

2535 2536 2537 2538 2539 2540
static void nvme_shutdown(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
	nvme_dev_shutdown(dev);
}

2541
static void nvme_remove(struct pci_dev *pdev)
M
Matthew Wilcox 已提交
2542 2543
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
K
Keith Busch 已提交
2544 2545 2546 2547 2548 2549 2550

	spin_lock(&dev_list_lock);
	list_del_init(&dev->node);
	spin_unlock(&dev_list_lock);

	pci_set_drvdata(pdev, NULL);
	flush_work(&dev->reset_work);
2551
	misc_deregister(&dev->miscdev);
K
Keith Busch 已提交
2552 2553
	nvme_dev_remove(dev);
	nvme_dev_shutdown(dev);
2554
	nvme_free_queues(dev, 0);
2555
	rcu_barrier();
K
Keith Busch 已提交
2556 2557
	nvme_release_instance(dev);
	nvme_release_prp_pools(dev);
2558
	kref_put(&dev->kref, nvme_free_dev);
M
Matthew Wilcox 已提交
2559 2560 2561 2562 2563 2564 2565 2566
}

/* These functions are yet to be implemented */
#define nvme_error_detected NULL
#define nvme_dump_registers NULL
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
2567

2568
#ifdef CONFIG_PM_SLEEP
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
static int nvme_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

	nvme_dev_shutdown(ndev);
	return 0;
}

static int nvme_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct nvme_dev *ndev = pci_get_drvdata(pdev);

K
Keith Busch 已提交
2583
	if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
M
Matthew Wilcox 已提交
2584
		PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
K
Keith Busch 已提交
2585 2586 2587
		queue_work(nvme_workq, &ndev->reset_work);
	}
	return 0;
2588
}
2589
#endif
2590 2591

static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
M
Matthew Wilcox 已提交
2592

2593
static const struct pci_error_handlers nvme_err_handler = {
M
Matthew Wilcox 已提交
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
	.error_detected	= nvme_error_detected,
	.mmio_enabled	= nvme_dump_registers,
	.link_reset	= nvme_link_reset,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
};

/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS	0x010802

static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
2614
	.remove		= nvme_remove,
2615
	.shutdown	= nvme_shutdown,
2616 2617 2618
	.driver		= {
		.pm	= &nvme_dev_pm_ops,
	},
M
Matthew Wilcox 已提交
2619 2620 2621 2622 2623
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
2624
	int result;
2625 2626 2627 2628

	nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
	if (IS_ERR(nvme_thread))
		return PTR_ERR(nvme_thread);
M
Matthew Wilcox 已提交
2629

K
Keith Busch 已提交
2630 2631 2632 2633 2634
	result = -ENOMEM;
	nvme_workq = create_singlethread_workqueue("nvme");
	if (!nvme_workq)
		goto kill_kthread;

2635 2636
	result = register_blkdev(nvme_major, "nvme");
	if (result < 0)
K
Keith Busch 已提交
2637
		goto kill_workq;
2638
	else if (result > 0)
2639
		nvme_major = result;
M
Matthew Wilcox 已提交
2640 2641

	result = pci_register_driver(&nvme_driver);
2642 2643 2644
	if (result)
		goto unregister_blkdev;
	return 0;
M
Matthew Wilcox 已提交
2645

2646
 unregister_blkdev:
M
Matthew Wilcox 已提交
2647
	unregister_blkdev(nvme_major, "nvme");
K
Keith Busch 已提交
2648 2649
 kill_workq:
	destroy_workqueue(nvme_workq);
2650 2651
 kill_kthread:
	kthread_stop(nvme_thread);
M
Matthew Wilcox 已提交
2652 2653 2654 2655 2656 2657 2658
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
	unregister_blkdev(nvme_major, "nvme");
K
Keith Busch 已提交
2659
	destroy_workqueue(nvme_workq);
2660
	kthread_stop(nvme_thread);
M
Matthew Wilcox 已提交
2661 2662 2663 2664
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
M
Matthew Wilcox 已提交
2665
MODULE_VERSION("0.8");
M
Matthew Wilcox 已提交
2666 2667
module_init(nvme_init);
module_exit(nvme_exit);