nvme.c 41.6 KB
Newer Older
M
Matthew Wilcox 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * NVM Express device driver
 * Copyright (c) 2011, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/nvme.h>
#include <linux/bio.h>
21
#include <linux/bitops.h>
M
Matthew Wilcox 已提交
22
#include <linux/blkdev.h>
23
#include <linux/delay.h>
M
Matthew Wilcox 已提交
24 25 26
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
27
#include <linux/idr.h>
M
Matthew Wilcox 已提交
28 29 30 31
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
32
#include <linux/kthread.h>
M
Matthew Wilcox 已提交
33 34 35 36 37
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
38
#include <linux/poison.h>
M
Matthew Wilcox 已提交
39 40 41 42 43 44 45 46 47
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/version.h>

#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
48 49
#define IO_TIMEOUT	(5 * HZ)
#define ADMIN_TIMEOUT	(60 * HZ)
M
Matthew Wilcox 已提交
50 51 52 53

static int nvme_major;
module_param(nvme_major, int, 0);

54 55 56
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

57 58 59 60
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;

M
Matthew Wilcox 已提交
61 62 63 64
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
65
	struct list_head node;
M
Matthew Wilcox 已提交
66 67 68
	struct nvme_queue **queues;
	u32 __iomem *dbs;
	struct pci_dev *pci_dev;
M
Matthew Wilcox 已提交
69
	struct dma_pool *prp_page_pool;
70
	struct dma_pool *prp_small_pool;
M
Matthew Wilcox 已提交
71 72 73 74 75 76
	int instance;
	int queue_count;
	u32 ctrl_config;
	struct msix_entry *entry;
	struct nvme_bar __iomem *bar;
	struct list_head namespaces;
77 78 79
	char serial[20];
	char model[40];
	char firmware_rev[8];
M
Matthew Wilcox 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
};

/*
 * An NVM Express namespace is equivalent to a SCSI LUN
 */
struct nvme_ns {
	struct list_head list;

	struct nvme_dev *dev;
	struct request_queue *queue;
	struct gendisk *disk;

	int ns_id;
	int lba_shift;
};

/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
102
	struct nvme_dev *dev;
M
Matthew Wilcox 已提交
103 104 105 106 107 108
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
	volatile struct nvme_completion *cqes;
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	wait_queue_head_t sq_full;
109
	wait_queue_t sq_cong_wait;
M
Matthew Wilcox 已提交
110 111 112 113 114 115 116
	struct bio_list sq_cong;
	u32 __iomem *q_db;
	u16 q_depth;
	u16 cq_vector;
	u16 sq_head;
	u16 sq_tail;
	u16 cq_head;
M
Matthew Wilcox 已提交
117
	u16 cq_phase;
M
Matthew Wilcox 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	unsigned long cmdid_data[];
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
}

137 138 139 140 141 142 143 144 145 146
struct nvme_cmd_info {
	unsigned long ctx;
	unsigned long timeout;
};

static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
{
	return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
}

M
Matthew Wilcox 已提交
147
/**
148 149 150 151
 * alloc_cmdid() - Allocate a Command ID
 * @nvmeq: The queue that will be used for this command
 * @ctx: A pointer that will be passed to the handler
 * @handler: The ID of the handler to call
M
Matthew Wilcox 已提交
152 153 154 155 156 157
 *
 * Allocate a Command ID for a queue.  The data passed in will
 * be passed to the completion handler.  This is implemented by using
 * the bottom two bits of the ctx pointer to store the handler ID.
 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
 * We can change this if it becomes a problem.
158 159 160
 *
 * May be called with local interrupts disabled and the q_lock held,
 * or with interrupts enabled and no locks held.
M
Matthew Wilcox 已提交
161
 */
162 163
static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
							unsigned timeout)
M
Matthew Wilcox 已提交
164
{
165
	int depth = nvmeq->q_depth - 1;
166
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
167 168 169 170 171 172 173 174 175 176
	int cmdid;

	BUG_ON((unsigned long)ctx & 3);

	do {
		cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
		if (cmdid >= depth)
			return -EBUSY;
	} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));

177 178
	info[cmdid].ctx = (unsigned long)ctx | handler;
	info[cmdid].timeout = jiffies + timeout;
M
Matthew Wilcox 已提交
179 180 181 182
	return cmdid;
}

static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
183
						int handler, unsigned timeout)
M
Matthew Wilcox 已提交
184 185 186
{
	int cmdid;
	wait_event_killable(nvmeq->sq_full,
187
		(cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
M
Matthew Wilcox 已提交
188 189 190
	return (cmdid < 0) ? -EINTR : cmdid;
}

M
Matthew Wilcox 已提交
191 192
/*
 * If you need more than four handlers, you'll need to change how
193 194
 * alloc_cmdid and nvme_process_cq work.  Consider using a special
 * CMD_CTX value instead, if that works for your situation.
M
Matthew Wilcox 已提交
195 196 197 198 199 200
 */
enum {
	sync_completion_id = 0,
	bio_completion_id,
};

M
Matthew Wilcox 已提交
201
/* Special values must be a multiple of 4, and less than 0x1000 */
202
#define CMD_CTX_BASE		(POISON_POINTER_DELTA + sync_completion_id)
203 204 205
#define CMD_CTX_CANCELLED	(0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
M
Matthew Wilcox 已提交
206
#define CMD_CTX_FLUSH		(0x318 + CMD_CTX_BASE)
207

208 209 210
/*
 * Called with local interrupts disabled and the q_lock held.  May not sleep.
 */
M
Matthew Wilcox 已提交
211 212 213
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
	unsigned long data;
214
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
215

216
	if (cmdid >= nvmeq->q_depth)
217
		return CMD_CTX_INVALID;
218 219
	data = info[cmdid].ctx;
	info[cmdid].ctx = CMD_CTX_COMPLETED;
M
Matthew Wilcox 已提交
220 221 222 223 224
	clear_bit(cmdid, nvmeq->cmdid_data);
	wake_up(&nvmeq->sq_full);
	return data;
}

225
static unsigned long cancel_cmdid(struct nvme_queue *nvmeq, int cmdid)
226
{
227
	unsigned long data;
228
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
229
	data = info[cmdid].ctx;
230
	info[cmdid].ctx = CMD_CTX_CANCELLED;
231
	return data;
232 233
}

M
Matthew Wilcox 已提交
234 235
static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
{
M
Matthew Wilcox 已提交
236
	return ns->dev->queues[get_cpu() + 1];
M
Matthew Wilcox 已提交
237 238 239 240
}

static void put_nvmeq(struct nvme_queue *nvmeq)
{
M
Matthew Wilcox 已提交
241
	put_cpu();
M
Matthew Wilcox 已提交
242 243 244
}

/**
245
 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
M
Matthew Wilcox 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
	unsigned long flags;
	u16 tail;
	spin_lock_irqsave(&nvmeq->q_lock, flags);
	tail = nvmeq->sq_tail;
	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
	if (++tail == nvmeq->q_depth)
		tail = 0;
260
	writel(tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
261 262 263 264 265 266
	nvmeq->sq_tail = tail;
	spin_unlock_irqrestore(&nvmeq->q_lock, flags);

	return 0;
}

267 268 269 270 271 272
struct nvme_prps {
	int npages;
	dma_addr_t first_dma;
	__le64 *list[0];
};

273
static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
274 275 276 277 278 279 280 281 282
{
	const int last_prp = PAGE_SIZE / 8 - 1;
	int i;
	dma_addr_t prp_dma;

	if (!prps)
		return;

	prp_dma = prps->first_dma;
283 284 285

	if (prps->npages == 0)
		dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
286 287 288
	for (i = 0; i < prps->npages; i++) {
		__le64 *prp_list = prps->list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
M
Matthew Wilcox 已提交
289
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
290 291 292 293 294
		prp_dma = next_prp_dma;
	}
	kfree(prps);
}

295
struct nvme_bio {
M
Matthew Wilcox 已提交
296 297
	struct bio *bio;
	int nents;
298
	struct nvme_prps *prps;
M
Matthew Wilcox 已提交
299 300 301 302
	struct scatterlist sg[0];
};

/* XXX: use a mempool */
303
static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
M
Matthew Wilcox 已提交
304
{
305
	return kzalloc(sizeof(struct nvme_bio) +
M
Matthew Wilcox 已提交
306 307 308
			sizeof(struct scatterlist) * nseg, gfp);
}

309
static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
M
Matthew Wilcox 已提交
310
{
311
	nvme_free_prps(nvmeq->dev, nbio->prps);
312
	kfree(nbio);
M
Matthew Wilcox 已提交
313 314 315 316 317
}

static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
{
318 319
	struct nvme_bio *nbio = ctx;
	struct bio *bio = nbio->bio;
M
Matthew Wilcox 已提交
320 321
	u16 status = le16_to_cpup(&cqe->status) >> 1;

322
	dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
M
Matthew Wilcox 已提交
323
			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
324
	free_nbio(nvmeq, nbio);
M
Matthew Wilcox 已提交
325
	if (status) {
326
		bio_endio(bio, -EIO);
M
Matthew Wilcox 已提交
327
	} else if (bio->bi_vcnt > bio->bi_idx) {
328 329
		if (bio_list_empty(&nvmeq->sq_cong))
			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
330 331 332 333 334
		bio_list_add(&nvmeq->sq_cong, bio);
		wake_up_process(nvme_thread);
	} else {
		bio_endio(bio, 0);
	}
M
Matthew Wilcox 已提交
335 336
}

337
/* length is in bytes.  gfp flags indicates whether we may sleep. */
338
static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
339
					struct nvme_common_command *cmd,
340 341
					struct scatterlist *sg, int *len,
					gfp_t gfp)
M
Matthew Wilcox 已提交
342
{
343
	struct dma_pool *pool;
344
	int length = *len;
M
Matthew Wilcox 已提交
345 346 347
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
	int offset = offset_in_page(dma_addr);
348 349 350 351
	__le64 *prp_list;
	dma_addr_t prp_dma;
	int nprps, npages, i, prp_page;
	struct nvme_prps *prps = NULL;
M
Matthew Wilcox 已提交
352 353 354 355

	cmd->prp1 = cpu_to_le64(dma_addr);
	length -= (PAGE_SIZE - offset);
	if (length <= 0)
356
		return prps;
M
Matthew Wilcox 已提交
357 358 359 360 361 362 363 364 365 366 367 368

	dma_len -= (PAGE_SIZE - offset);
	if (dma_len) {
		dma_addr += (PAGE_SIZE - offset);
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

	if (length <= PAGE_SIZE) {
		cmd->prp2 = cpu_to_le64(dma_addr);
369 370 371 372 373
		return prps;
	}

	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
	npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
374 375 376 377 378 379
	prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
	if (!prps) {
		cmd->prp2 = cpu_to_le64(dma_addr);
		*len = (*len - length) + PAGE_SIZE;
		return prps;
	}
380
	prp_page = 0;
381 382 383 384 385 386 387 388
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
		prps->npages = 0;
	} else {
		pool = dev->prp_page_pool;
		prps->npages = npages;
	}

389 390 391 392 393 394 395
	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
	if (!prp_list) {
		cmd->prp2 = cpu_to_le64(dma_addr);
		*len = (*len - length) + PAGE_SIZE;
		kfree(prps);
		return NULL;
	}
396 397 398 399 400
	prps->list[prp_page++] = prp_list;
	prps->first_dma = prp_dma;
	cmd->prp2 = cpu_to_le64(prp_dma);
	i = 0;
	for (;;) {
401
		if (i == PAGE_SIZE / 8) {
402
			__le64 *old_prp_list = prp_list;
403 404 405 406 407
			prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
			if (!prp_list) {
				*len = (*len - length);
				return prps;
			}
408
			prps->list[prp_page++] = prp_list;
409 410 411
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
412 413 414 415 416 417 418 419 420 421 422 423 424
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
		dma_len -= PAGE_SIZE;
		dma_addr += PAGE_SIZE;
		length -= PAGE_SIZE;
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
425 426
	}

427
	return prps;
M
Matthew Wilcox 已提交
428 429
}

430 431 432 433
/* NVMe scatterlists require no holes in the virtual address */
#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)	((vec2)->bv_offset || \
			(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))

434
static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
M
Matthew Wilcox 已提交
435 436
		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
437 438
	struct bio_vec *bvec, *bvprv = NULL;
	struct scatterlist *sg = NULL;
439
	int i, old_idx, length = 0, nsegs = 0;
M
Matthew Wilcox 已提交
440

441
	sg_init_table(nbio->sg, psegs);
442
	old_idx = bio->bi_idx;
M
Matthew Wilcox 已提交
443
	bio_for_each_segment(bvec, bio, i) {
444 445 446
		if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
			sg->length += bvec->bv_len;
		} else {
447 448
			if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
				break;
449 450 451 452 453
			sg = sg ? sg + 1 : nbio->sg;
			sg_set_page(sg, bvec->bv_page, bvec->bv_len,
							bvec->bv_offset);
			nsegs++;
		}
454
		length += bvec->bv_len;
455
		bvprv = bvec;
M
Matthew Wilcox 已提交
456
	}
457
	bio->bi_idx = i;
458
	nbio->nents = nsegs;
459
	sg_mark_end(sg);
460 461 462 463 464
	if (dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir) == 0) {
		bio->bi_idx = old_idx;
		return -ENOMEM;
	}
	return length;
M
Matthew Wilcox 已提交
465 466
}

M
Matthew Wilcox 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
								int cmdid)
{
	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->common.opcode = nvme_cmd_flush;
	cmnd->common.command_id = cmdid;
	cmnd->common.nsid = cpu_to_le32(ns->ns_id);

	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	writel(nvmeq->sq_tail, nvmeq->q_db);

	return 0;
}

static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
{
	int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
						sync_completion_id, IO_TIMEOUT);
	if (unlikely(cmdid < 0))
		return cmdid;

	return nvme_submit_flush(nvmeq, ns, cmdid);
}

494 495 496
/*
 * Called with local interrupts disabled and the q_lock held.  May not sleep.
 */
M
Matthew Wilcox 已提交
497 498 499
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
								struct bio *bio)
{
M
Matthew Wilcox 已提交
500
	struct nvme_command *cmnd;
501
	struct nvme_bio *nbio;
M
Matthew Wilcox 已提交
502
	enum dma_data_direction dma_dir;
503
	int cmdid, length, result = -ENOMEM;
M
Matthew Wilcox 已提交
504 505 506 507
	u16 control;
	u32 dsmgmt;
	int psegs = bio_phys_segments(ns->queue, bio);

M
Matthew Wilcox 已提交
508 509 510 511 512 513
	if ((bio->bi_rw & REQ_FLUSH) && psegs) {
		result = nvme_submit_flush_data(nvmeq, ns);
		if (result)
			return result;
	}

514
	nbio = alloc_nbio(psegs, GFP_ATOMIC);
515
	if (!nbio)
516
		goto nomem;
517
	nbio->bio = bio;
M
Matthew Wilcox 已提交
518

519
	result = -EBUSY;
520
	cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
M
Matthew Wilcox 已提交
521
	if (unlikely(cmdid < 0))
522
		goto free_nbio;
M
Matthew Wilcox 已提交
523

M
Matthew Wilcox 已提交
524 525 526
	if ((bio->bi_rw & REQ_FLUSH) && !psegs)
		return nvme_submit_flush(nvmeq, ns, cmdid);

M
Matthew Wilcox 已提交
527 528 529 530 531 532 533 534 535 536
	control = 0;
	if (bio->bi_rw & REQ_FUA)
		control |= NVME_RW_FUA;
	if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	dsmgmt = 0;
	if (bio->bi_rw & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

M
Matthew Wilcox 已提交
537
	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
M
Matthew Wilcox 已提交
538

539
	memset(cmnd, 0, sizeof(*cmnd));
M
Matthew Wilcox 已提交
540
	if (bio_data_dir(bio)) {
M
Matthew Wilcox 已提交
541
		cmnd->rw.opcode = nvme_cmd_write;
M
Matthew Wilcox 已提交
542 543
		dma_dir = DMA_TO_DEVICE;
	} else {
M
Matthew Wilcox 已提交
544
		cmnd->rw.opcode = nvme_cmd_read;
M
Matthew Wilcox 已提交
545 546 547
		dma_dir = DMA_FROM_DEVICE;
	}

548 549
	result = nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs);
	if (result < 0)
550
		goto free_nbio;
551
	length = result;
M
Matthew Wilcox 已提交
552

M
Matthew Wilcox 已提交
553 554
	cmnd->rw.command_id = cmdid;
	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
555
	nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
556
							&length, GFP_ATOMIC);
M
Matthew Wilcox 已提交
557
	cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
558
	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
M
Matthew Wilcox 已提交
559 560
	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
M
Matthew Wilcox 已提交
561

M
Matthew Wilcox 已提交
562 563
	bio->bi_sector += length >> 9;

M
Matthew Wilcox 已提交
564 565
	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
566
	writel(nvmeq->sq_tail, nvmeq->q_db);
M
Matthew Wilcox 已提交
567

568 569
	return 0;

570 571
 free_nbio:
	free_nbio(nvmeq, nbio);
572 573
 nomem:
	return result;
M
Matthew Wilcox 已提交
574 575 576 577 578 579 580 581 582 583
}

/*
 * NB: return value of non-zero would mean that we were a stacking driver.
 * make_request must always succeed.
 */
static int nvme_make_request(struct request_queue *q, struct bio *bio)
{
	struct nvme_ns *ns = q->queuedata;
	struct nvme_queue *nvmeq = get_nvmeq(ns);
584 585 586 587 588 589 590 591
	int result = -EBUSY;

	spin_lock_irq(&nvmeq->q_lock);
	if (bio_list_empty(&nvmeq->sq_cong))
		result = nvme_submit_bio_queue(nvmeq, ns, bio);
	if (unlikely(result)) {
		if (bio_list_empty(&nvmeq->sq_cong))
			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
M
Matthew Wilcox 已提交
592 593
		bio_list_add(&nvmeq->sq_cong, bio);
	}
594 595

	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	put_nvmeq(nvmeq);

	return 0;
}

struct sync_cmd_info {
	struct task_struct *task;
	u32 result;
	int status;
};

static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
{
	struct sync_cmd_info *cmdinfo = ctx;
611
	if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED))
612
		return;
M
Matthew Wilcox 已提交
613 614
	if ((unsigned long)cmdinfo == CMD_CTX_FLUSH)
		return;
615 616 617 618 619 620
	if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
		dev_warn(nvmeq->q_dmadev,
				"completed id %d twice on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
621 622 623 624 625 626
	if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
		dev_warn(nvmeq->q_dmadev,
				"invalid id %d completed on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
M
Matthew Wilcox 已提交
627 628 629 630 631 632 633 634
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	wake_up_process(cmdinfo->task);
}

typedef void (*completion_fn)(struct nvme_queue *, void *,
						struct nvme_completion *);

635 636 637 638 639
static const completion_fn nvme_completions[4] = {
	[sync_completion_id] = sync_completion,
	[bio_completion_id]  = bio_completion,
};

M
Matthew Wilcox 已提交
640 641
static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
M
Matthew Wilcox 已提交
642
	u16 head, phase;
M
Matthew Wilcox 已提交
643 644

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
645
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
646 647 648 649 650 651

	for (;;) {
		unsigned long data;
		void *ptr;
		unsigned char handler;
		struct nvme_completion cqe = nvmeq->cqes[head];
M
Matthew Wilcox 已提交
652
		if ((le16_to_cpu(cqe.status) & 1) != phase)
M
Matthew Wilcox 已提交
653 654 655 656
			break;
		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
657
			phase = !phase;
M
Matthew Wilcox 已提交
658 659 660 661 662
		}

		data = free_cmdid(nvmeq, cqe.command_id);
		handler = data & 3;
		ptr = (void *)(data & ~3UL);
663
		nvme_completions[handler](nvmeq, ptr, &cqe);
M
Matthew Wilcox 已提交
664 665 666 667 668 669 670 671
	}

	/* If the controller ignores the cq head doorbell and continuously
	 * writes to the queue, it is theoretically possible to wrap around
	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
	 * requires that 0.1% of your interrupts are handled, so this isn't
	 * a big problem.
	 */
M
Matthew Wilcox 已提交
672
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
M
Matthew Wilcox 已提交
673 674 675 676
		return IRQ_NONE;

	writel(head, nvmeq->q_db + 1);
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
677
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
678 679 680 681 682

	return IRQ_HANDLED;
}

static irqreturn_t nvme_irq(int irq, void *data)
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
	result = nvme_process_cq(nvmeq);
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
	struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
	if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
		return IRQ_NONE;
	return IRQ_WAKE_THREAD;
}

701 702 703
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
	spin_lock_irq(&nvmeq->q_lock);
704
	cancel_cmdid(nvmeq, cmdid);
705 706 707
	spin_unlock_irq(&nvmeq->q_lock);
}

M
Matthew Wilcox 已提交
708 709 710 711
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
712
static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
713
			struct nvme_command *cmd, u32 *result, unsigned timeout)
M
Matthew Wilcox 已提交
714 715 716 717 718 719 720
{
	int cmdid;
	struct sync_cmd_info cmdinfo;

	cmdinfo.task = current;
	cmdinfo.status = -EINTR;

721 722
	cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
								timeout);
M
Matthew Wilcox 已提交
723 724 725 726
	if (cmdid < 0)
		return cmdid;
	cmd->common.command_id = cmdid;

727 728
	set_current_state(TASK_KILLABLE);
	nvme_submit_cmd(nvmeq, cmd);
M
Matthew Wilcox 已提交
729 730
	schedule();

731 732 733 734 735
	if (cmdinfo.status == -EINTR) {
		nvme_abort_command(nvmeq, cmdid);
		return -EINTR;
	}

M
Matthew Wilcox 已提交
736 737 738 739 740 741 742 743 744
	if (result)
		*result = cmdinfo.result;

	return cmdinfo.status;
}

static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
								u32 *result)
{
745
	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
M
Matthew Wilcox 已提交
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
}

static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
	int status;
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
							dma_addr_t dma_addr)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.prp1 = cpu_to_le64(dma_addr);
	c.identify.cns = cpu_to_le32(cns);

	return nvme_submit_admin_cmd(dev, &c, NULL);
}

static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
			unsigned dword11, dma_addr_t dma_addr, u32 *result)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_get_features;
	c.features.prp1 = cpu_to_le64(dma_addr);
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

	return nvme_submit_admin_cmd(dev, &c, result);
}

M
Matthew Wilcox 已提交
843 844 845
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
	struct nvme_queue *nvmeq = dev->queues[qid];
M
Matthew Wilcox 已提交
846
	int vector = dev->entry[nvmeq->cq_vector].vector;
M
Matthew Wilcox 已提交
847

M
Matthew Wilcox 已提交
848 849
	irq_set_affinity_hint(vector, NULL);
	free_irq(vector, nvmeq);
M
Matthew Wilcox 已提交
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867

	/* Don't tell the adapter to delete the admin queue */
	if (qid) {
		adapter_delete_sq(dev, qid);
		adapter_delete_cq(dev, qid);
	}

	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
							int depth, int vector)
{
	struct device *dmadev = &dev->pci_dev->dev;
868
	unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
M
Matthew Wilcox 已提交
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
	if (!nvmeq)
		return NULL;

	nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
					&nvmeq->cq_dma_addr, GFP_KERNEL);
	if (!nvmeq->cqes)
		goto free_nvmeq;
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));

	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
	if (!nvmeq->sq_cmds)
		goto free_cqdma;

	nvmeq->q_dmadev = dmadev;
M
Matthew Wilcox 已提交
885
	nvmeq->dev = dev;
M
Matthew Wilcox 已提交
886 887
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
888
	nvmeq->cq_phase = 1;
M
Matthew Wilcox 已提交
889
	init_waitqueue_head(&nvmeq->sq_full);
890
	init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
M
Matthew Wilcox 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
	bio_list_init(&nvmeq->sq_cong);
	nvmeq->q_db = &dev->dbs[qid * 2];
	nvmeq->q_depth = depth;
	nvmeq->cq_vector = vector;

	return nvmeq;

 free_cqdma:
	dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

906 907 908
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
							const char *name)
{
909 910
	if (use_threaded_interrupts)
		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
911
					nvme_irq_check, nvme_irq,
912 913
					IRQF_DISABLED | IRQF_SHARED,
					name, nvmeq);
914 915 916 917
	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
				IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}

M
Matthew Wilcox 已提交
918 919 920 921 922 923
static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
					int qid, int cq_size, int vector)
{
	int result;
	struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);

924
	if (!nvmeq)
925
		return ERR_PTR(-ENOMEM);
926

M
Matthew Wilcox 已提交
927 928 929 930 931 932 933 934
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
		goto free_nvmeq;

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

935
	result = queue_request_irq(dev, nvmeq, "nvme");
M
Matthew Wilcox 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
	if (result < 0)
		goto release_sq;

	return nvmeq;

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
 free_nvmeq:
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
951
	return ERR_PTR(result);
M
Matthew Wilcox 已提交
952 953 954 955 956 957
}

static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
{
	int result;
	u32 aqa;
958 959
	u64 cap;
	unsigned long timeout;
M
Matthew Wilcox 已提交
960 961 962 963 964
	struct nvme_queue *nvmeq;

	dev->dbs = ((void __iomem *)dev->bar) + 4096;

	nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
965 966
	if (!nvmeq)
		return -ENOMEM;
M
Matthew Wilcox 已提交
967 968 969 970 971 972 973

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

	dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
	dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
974
	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
M
Matthew Wilcox 已提交
975

976
	writel(0, &dev->bar->cc);
M
Matthew Wilcox 已提交
977 978 979 980 981
	writel(aqa, &dev->bar->aqa);
	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
	writel(dev->ctrl_config, &dev->bar->cc);

982 983 984
	cap = readq(&dev->bar->cap);
	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;

M
Matthew Wilcox 已提交
985 986 987 988
	while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
989 990 991 992 993
		if (time_after(jiffies, timeout)) {
			dev_err(&dev->pci_dev->dev,
				"Device not ready; aborting initialisation\n");
			return -ENODEV;
		}
M
Matthew Wilcox 已提交
994 995
	}

996
	result = queue_request_irq(dev, nvmeq, "nvme admin");
M
Matthew Wilcox 已提交
997 998 999 1000
	dev->queues[0] = nvmeq;
	return result;
}

1001 1002 1003
static int nvme_map_user_pages(struct nvme_dev *dev, int write,
				unsigned long addr, unsigned length,
				struct scatterlist **sgp)
M
Matthew Wilcox 已提交
1004
{
1005
	int i, err, count, nents, offset;
1006 1007
	struct scatterlist *sg;
	struct page **pages;
1008 1009 1010

	if (addr & 3)
		return -EINVAL;
1011 1012 1013
	if (!length)
		return -EINVAL;

1014
	offset = offset_in_page(addr);
1015 1016
	count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
	pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1017 1018 1019 1020 1021 1022 1023

	err = get_user_pages_fast(addr, count, 1, pages);
	if (err < count) {
		count = err;
		err = -EFAULT;
		goto put_pages;
	}
1024 1025

	sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
1026
	sg_init_table(sg, count);
1027 1028 1029 1030 1031
	for (i = 0; i < count; i++) {
		sg_set_page(&sg[i], pages[i],
				min_t(int, length, PAGE_SIZE - offset), offset);
		length -= (PAGE_SIZE - offset);
		offset = 0;
1032 1033 1034 1035 1036
	}

	err = -ENOMEM;
	nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1037 1038
	if (!nents)
		goto put_pages;
M
Matthew Wilcox 已提交
1039

1040 1041 1042
	kfree(pages);
	*sgp = sg;
	return nents;
M
Matthew Wilcox 已提交
1043

1044 1045 1046 1047 1048 1049
 put_pages:
	for (i = 0; i < count; i++)
		put_page(pages[i]);
	kfree(pages);
	return err;
}
M
Matthew Wilcox 已提交
1050

1051
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
N
Nisheeth Bhat 已提交
1052
			unsigned long addr, int length, struct scatterlist *sg)
1053 1054
{
	int i, count;
M
Matthew Wilcox 已提交
1055

1056
	count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
N
Nisheeth Bhat 已提交
1057
	dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
1058

1059
	for (i = 0; i < count; i++)
1060 1061
		put_page(sg_page(&sg[i]));
}
M
Matthew Wilcox 已提交
1062

M
Matthew Wilcox 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_queue *nvmeq;
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length;
	int nents, status;
	struct scatterlist *sg;
1072
	struct nvme_prps *prps;
M
Matthew Wilcox 已提交
1073 1074 1075

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
1076 1077 1078 1079 1080
	length = (io.nblocks + 1) << ns->lba_shift;

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
M
Matthew Wilcox 已提交
1081
	case nvme_cmd_compare:
1082 1083
		nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
								length, &sg);
M
Matthew Wilcox 已提交
1084
		break;
1085
	default:
M
Matthew Wilcox 已提交
1086
		return -EINVAL;
1087 1088
	}

M
Matthew Wilcox 已提交
1089 1090 1091 1092 1093 1094
	if (nents < 0)
		return nents;

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
1095
	c.rw.nsid = cpu_to_le32(ns->ns_id);
M
Matthew Wilcox 已提交
1096
	c.rw.slba = cpu_to_le64(io.slba);
1097
	c.rw.length = cpu_to_le16(io.nblocks);
M
Matthew Wilcox 已提交
1098 1099
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
1100 1101 1102
	c.rw.reftag = io.reftag;
	c.rw.apptag = io.apptag;
	c.rw.appmask = io.appmask;
M
Matthew Wilcox 已提交
1103
	/* XXX: metadata */
1104
	prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
M
Matthew Wilcox 已提交
1105

1106
	nvmeq = get_nvmeq(ns);
M
Matthew Wilcox 已提交
1107 1108
	/*
	 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1109 1110 1111 1112
	 * disabled.  We may be preempted at any point, and be rescheduled
	 * to a different CPU.  That will cause cacheline bouncing, but no
	 * additional races since q_lock already protects against other CPUs.
	 */
M
Matthew Wilcox 已提交
1113
	put_nvmeq(nvmeq);
1114 1115 1116 1117
	if (length != (io.nblocks + 1) << ns->lba_shift)
		status = -ENOMEM;
	else
		status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
M
Matthew Wilcox 已提交
1118

N
Nisheeth Bhat 已提交
1119
	nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg);
1120
	nvme_free_prps(dev, prps);
M
Matthew Wilcox 已提交
1121 1122 1123
	return status;
}

M
Matthew Wilcox 已提交
1124 1125
static int nvme_user_admin_cmd(struct nvme_ns *ns,
					struct nvme_admin_cmd __user *ucmd)
1126 1127
{
	struct nvme_dev *dev = ns->dev;
M
Matthew Wilcox 已提交
1128
	struct nvme_admin_cmd cmd;
1129
	struct nvme_command c;
M
Matthew Wilcox 已提交
1130
	int status, length, nents = 0;
1131
	struct scatterlist *sg;
M
Matthew Wilcox 已提交
1132
	struct nvme_prps *prps = NULL;
1133

M
Matthew Wilcox 已提交
1134 1135 1136
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1137 1138 1139
		return -EFAULT;

	memset(&c, 0, sizeof(c));
M
Matthew Wilcox 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	c.common.opcode = cmd.opcode;
	c.common.flags = cmd.flags;
	c.common.nsid = cpu_to_le32(cmd.nsid);
	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);

	length = cmd.data_len;
	if (cmd.data_len) {
		nents = nvme_map_user_pages(dev, 1, cmd.addr, length, &sg);
		if (nents < 0)
			return nents;
		prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
	}

	if (length != cmd.data_len)
1161 1162 1163
		status = -ENOMEM;
	else
		status = nvme_submit_admin_cmd(dev, &c, NULL);
M
Matthew Wilcox 已提交
1164
	if (cmd.data_len) {
N
Nisheeth Bhat 已提交
1165
		nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg);
M
Matthew Wilcox 已提交
1166 1167
		nvme_free_prps(dev, prps);
	}
1168 1169 1170
	return status;
}

M
Matthew Wilcox 已提交
1171 1172 1173 1174 1175 1176
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
							unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
M
Matthew Wilcox 已提交
1177 1178 1179 1180
	case NVME_IOCTL_ID:
		return ns->ns_id;
	case NVME_IOCTL_ADMIN_CMD:
		return nvme_user_admin_cmd(ns, (void __user *)arg);
M
Matthew Wilcox 已提交
1181 1182
	case NVME_IOCTL_SUBMIT_IO:
		return nvme_submit_io(ns, (void __user *)arg);
M
Matthew Wilcox 已提交
1183 1184 1185 1186 1187 1188 1189 1190
	default:
		return -ENOTTY;
	}
}

static const struct block_device_operations nvme_fops = {
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
M
Matthew Wilcox 已提交
1191
	.compat_ioctl	= nvme_ioctl,
M
Matthew Wilcox 已提交
1192 1193
};

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
static void nvme_timeout_ios(struct nvme_queue *nvmeq)
{
	int depth = nvmeq->q_depth - 1;
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
	unsigned long now = jiffies;
	int cmdid;

	for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
		unsigned long data;
		void *ptr;
		unsigned char handler;
		static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };

		if (!time_after(now, info[cmdid].timeout))
			continue;
		dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
		data = cancel_cmdid(nvmeq, cmdid);
		handler = data & 3;
		ptr = (void *)(data & ~3UL);
		nvme_completions[handler](nvmeq, ptr, &cqe);
	}
}

1217 1218 1219 1220 1221 1222 1223 1224 1225
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
	while (bio_list_peek(&nvmeq->sq_cong)) {
		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
		struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
			bio_list_add_head(&nvmeq->sq_cong, bio);
			break;
		}
1226 1227 1228
		if (bio_list_empty(&nvmeq->sq_cong))
			remove_wait_queue(&nvmeq->sq_full,
							&nvmeq->sq_cong_wait);
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	}
}

static int nvme_kthread(void *data)
{
	struct nvme_dev *dev;

	while (!kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock(&dev_list_lock);
		list_for_each_entry(dev, &dev_list, node) {
			int i;
			for (i = 0; i < dev->queue_count; i++) {
				struct nvme_queue *nvmeq = dev->queues[i];
1243 1244
				if (!nvmeq)
					continue;
1245 1246 1247
				spin_lock_irq(&nvmeq->q_lock);
				if (nvme_process_cq(nvmeq))
					printk("process_cq did something\n");
1248
				nvme_timeout_ios(nvmeq);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
				nvme_resubmit_bios(nvmeq);
				spin_unlock_irq(&nvmeq->q_lock);
			}
		}
		spin_unlock(&dev_list_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}
	return 0;
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
static DEFINE_IDA(nvme_index_ida);

static int nvme_get_ns_idx(void)
{
	int index, error;

	do {
		if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
			return -1;

		spin_lock(&dev_list_lock);
		error = ida_get_new(&nvme_index_ida, &index);
		spin_unlock(&dev_list_lock);
	} while (error == -EAGAIN);

	if (error)
		index = -1;
	return index;
}

static void nvme_put_ns_idx(int index)
{
	spin_lock(&dev_list_lock);
	ida_remove(&nvme_index_ida, index);
	spin_unlock(&dev_list_lock);
}

static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
M
Matthew Wilcox 已提交
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
	int lbaf;

	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
		return NULL;

	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
	if (!ns)
		return NULL;
	ns->queue = blk_alloc_queue(GFP_KERNEL);
	if (!ns->queue)
		goto out_free_ns;
	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
				QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
	blk_queue_make_request(ns->queue, nvme_make_request);
	ns->dev = dev;
	ns->queue->queuedata = ns;

	disk = alloc_disk(NVME_MINORS);
	if (!disk)
		goto out_free_queue;
1312
	ns->ns_id = nsid;
M
Matthew Wilcox 已提交
1313 1314 1315 1316 1317 1318
	ns->disk = disk;
	lbaf = id->flbas & 0xf;
	ns->lba_shift = id->lbaf[lbaf].ds;

	disk->major = nvme_major;
	disk->minors = NVME_MINORS;
1319
	disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
M
Matthew Wilcox 已提交
1320 1321 1322
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
1323
	disk->driverfs_dev = &dev->pci_dev->dev;
1324
	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
M
Matthew Wilcox 已提交
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

	return ns;

 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
	return NULL;
}

static void nvme_ns_free(struct nvme_ns *ns)
{
1338
	int index = ns->disk->first_minor / NVME_MINORS;
M
Matthew Wilcox 已提交
1339
	put_disk(ns->disk);
1340
	nvme_put_ns_idx(index);
M
Matthew Wilcox 已提交
1341 1342 1343 1344
	blk_cleanup_queue(ns->queue);
	kfree(ns);
}

1345
static int set_queue_count(struct nvme_dev *dev, int count)
M
Matthew Wilcox 已提交
1346 1347 1348
{
	int status;
	u32 result;
1349
	u32 q_count = (count - 1) | ((count - 1) << 16);
M
Matthew Wilcox 已提交
1350

1351 1352
	status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
								&result);
M
Matthew Wilcox 已提交
1353 1354 1355 1356 1357 1358 1359
	if (status)
		return -EIO;
	return min(result & 0xffff, result >> 16) + 1;
}

static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
1360
	int result, cpu, i, nr_io_queues;
M
Matthew Wilcox 已提交
1361

1362 1363
	nr_io_queues = num_online_cpus();
	result = set_queue_count(dev, nr_io_queues);
M
Matthew Wilcox 已提交
1364 1365
	if (result < 0)
		return result;
1366 1367
	if (result < nr_io_queues)
		nr_io_queues = result;
M
Matthew Wilcox 已提交
1368

M
Matthew Wilcox 已提交
1369 1370 1371
	/* Deregister the admin queue's interrupt */
	free_irq(dev->entry[0].vector, dev->queues[0]);

1372
	for (i = 0; i < nr_io_queues; i++)
M
Matthew Wilcox 已提交
1373 1374
		dev->entry[i].entry = i;
	for (;;) {
1375 1376
		result = pci_enable_msix(dev->pci_dev, dev->entry,
								nr_io_queues);
M
Matthew Wilcox 已提交
1377 1378 1379
		if (result == 0) {
			break;
		} else if (result > 0) {
1380
			nr_io_queues = result;
M
Matthew Wilcox 已提交
1381 1382
			continue;
		} else {
1383
			nr_io_queues = 1;
M
Matthew Wilcox 已提交
1384 1385 1386 1387 1388 1389 1390 1391
			break;
		}
	}

	result = queue_request_irq(dev, dev->queues[0], "nvme admin");
	/* XXX: handle failure here */

	cpu = cpumask_first(cpu_online_mask);
1392
	for (i = 0; i < nr_io_queues; i++) {
M
Matthew Wilcox 已提交
1393 1394 1395 1396
		irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
		cpu = cpumask_next(cpu, cpu_online_mask);
	}

1397
	for (i = 0; i < nr_io_queues; i++) {
M
Matthew Wilcox 已提交
1398 1399
		dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
							NVME_Q_DEPTH, i);
1400 1401
		if (IS_ERR(dev->queues[i + 1]))
			return PTR_ERR(dev->queues[i + 1]);
M
Matthew Wilcox 已提交
1402 1403
		dev->queue_count++;
	}
M
Matthew Wilcox 已提交
1404

M
Matthew Wilcox 已提交
1405 1406 1407 1408 1409
	for (; i < num_possible_cpus(); i++) {
		int target = i % rounddown_pow_of_two(dev->queue_count - 1);
		dev->queues[i + 1] = dev->queues[target + 1];
	}

M
Matthew Wilcox 已提交
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
	return 0;
}

static void nvme_free_queues(struct nvme_dev *dev)
{
	int i;

	for (i = dev->queue_count - 1; i >= 0; i--)
		nvme_free_queue(dev, i);
}

static int __devinit nvme_dev_add(struct nvme_dev *dev)
{
	int res, nn, i;
	struct nvme_ns *ns, *next;
1425
	struct nvme_id_ctrl *ctrl;
1426 1427
	struct nvme_id_ns *id_ns;
	void *mem;
M
Matthew Wilcox 已提交
1428 1429 1430 1431 1432 1433
	dma_addr_t dma_addr;

	res = nvme_setup_io_queues(dev);
	if (res)
		return res;

1434
	mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
M
Matthew Wilcox 已提交
1435 1436
								GFP_KERNEL);

1437
	res = nvme_identify(dev, 0, 1, dma_addr);
M
Matthew Wilcox 已提交
1438 1439 1440 1441 1442
	if (res) {
		res = -EIO;
		goto out_free;
	}

1443
	ctrl = mem;
1444 1445 1446 1447
	nn = le32_to_cpup(&ctrl->nn);
	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
M
Matthew Wilcox 已提交
1448

1449
	id_ns = mem;
1450
	for (i = 0; i <= nn; i++) {
1451
		res = nvme_identify(dev, i, 0, dma_addr);
M
Matthew Wilcox 已提交
1452 1453 1454
		if (res)
			continue;

1455
		if (id_ns->ncap == 0)
M
Matthew Wilcox 已提交
1456 1457
			continue;

1458 1459
		res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
							dma_addr + 4096, NULL);
M
Matthew Wilcox 已提交
1460 1461 1462
		if (res)
			continue;

1463
		ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
M
Matthew Wilcox 已提交
1464 1465 1466 1467 1468 1469
		if (ns)
			list_add_tail(&ns->list, &dev->namespaces);
	}
	list_for_each_entry(ns, &dev->namespaces, list)
		add_disk(ns->disk);

1470
	goto out;
M
Matthew Wilcox 已提交
1471 1472 1473 1474 1475 1476 1477

 out_free:
	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
		nvme_ns_free(ns);
	}

1478
 out:
1479
	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
M
Matthew Wilcox 已提交
1480 1481 1482 1483 1484 1485 1486
	return res;
}

static int nvme_dev_remove(struct nvme_dev *dev)
{
	struct nvme_ns *ns, *next;

1487 1488 1489 1490
	spin_lock(&dev_list_lock);
	list_del(&dev->node);
	spin_unlock(&dev_list_lock);

M
Matthew Wilcox 已提交
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	/* TODO: wait all I/O finished or cancel them */

	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
		del_gendisk(ns->disk);
		nvme_ns_free(ns);
	}

	nvme_free_queues(dev);

	return 0;
}

M
Matthew Wilcox 已提交
1504 1505 1506 1507 1508 1509 1510 1511
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
	struct device *dmadev = &dev->pci_dev->dev;
	dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

1512 1513 1514 1515 1516 1517 1518
	/* Optimisation for I/Os between 4k and 128k */
	dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1519 1520 1521 1522 1523 1524
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
1525
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
1526 1527
}

M
Matthew Wilcox 已提交
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
/* XXX: Use an ida or something to let remove / add work correctly */
static void nvme_set_instance(struct nvme_dev *dev)
{
	static int instance;
	dev->instance = instance++;
}

static void nvme_release_instance(struct nvme_dev *dev)
{
}

static int __devinit nvme_probe(struct pci_dev *pdev,
						const struct pci_device_id *id)
{
M
Matthew Wilcox 已提交
1542
	int bars, result = -ENOMEM;
M
Matthew Wilcox 已提交
1543 1544 1545 1546 1547 1548 1549 1550 1551
	struct nvme_dev *dev;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
	dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
								GFP_KERNEL);
	if (!dev->entry)
		goto free;
M
Matthew Wilcox 已提交
1552 1553
	dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
								GFP_KERNEL);
M
Matthew Wilcox 已提交
1554 1555 1556
	if (!dev->queues)
		goto free;

1557 1558
	if (pci_enable_device_mem(pdev))
		goto free;
M
Matthew Wilcox 已提交
1559
	pci_set_master(pdev);
M
Matthew Wilcox 已提交
1560 1561 1562
	bars = pci_select_bars(pdev, IORESOURCE_MEM);
	if (pci_request_selected_regions(pdev, bars, "nvme"))
		goto disable;
1563

M
Matthew Wilcox 已提交
1564 1565 1566
	INIT_LIST_HEAD(&dev->namespaces);
	dev->pci_dev = pdev;
	pci_set_drvdata(pdev, dev);
1567 1568
	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
M
Matthew Wilcox 已提交
1569
	nvme_set_instance(dev);
1570
	dev->entry[0].vector = pdev->irq;
M
Matthew Wilcox 已提交
1571

M
Matthew Wilcox 已提交
1572 1573 1574 1575
	result = nvme_setup_prp_pools(dev);
	if (result)
		goto disable_msix;

M
Matthew Wilcox 已提交
1576 1577 1578
	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar) {
		result = -ENOMEM;
M
Matthew Wilcox 已提交
1579
		goto disable_msix;
M
Matthew Wilcox 已提交
1580 1581 1582 1583 1584 1585 1586
	}

	result = nvme_configure_admin_queue(dev);
	if (result)
		goto unmap;
	dev->queue_count++;

1587 1588 1589 1590
	spin_lock(&dev_list_lock);
	list_add(&dev->node, &dev_list);
	spin_unlock(&dev_list_lock);

1591 1592 1593 1594
	result = nvme_dev_add(dev);
	if (result)
		goto delete;

M
Matthew Wilcox 已提交
1595 1596 1597
	return 0;

 delete:
1598 1599 1600 1601
	spin_lock(&dev_list_lock);
	list_del(&dev->node);
	spin_unlock(&dev_list_lock);

M
Matthew Wilcox 已提交
1602 1603 1604
	nvme_free_queues(dev);
 unmap:
	iounmap(dev->bar);
M
Matthew Wilcox 已提交
1605
 disable_msix:
M
Matthew Wilcox 已提交
1606 1607
	pci_disable_msix(pdev);
	nvme_release_instance(dev);
M
Matthew Wilcox 已提交
1608
	nvme_release_prp_pools(dev);
M
Matthew Wilcox 已提交
1609
 disable:
1610
	pci_disable_device(pdev);
M
Matthew Wilcox 已提交
1611
	pci_release_regions(pdev);
M
Matthew Wilcox 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
 free:
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
	return result;
}

static void __devexit nvme_remove(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
	nvme_dev_remove(dev);
	pci_disable_msix(pdev);
	iounmap(dev->bar);
	nvme_release_instance(dev);
M
Matthew Wilcox 已提交
1626
	nvme_release_prp_pools(dev);
1627
	pci_disable_device(pdev);
M
Matthew Wilcox 已提交
1628
	pci_release_regions(pdev);
M
Matthew Wilcox 已提交
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
}

/* These functions are yet to be implemented */
#define nvme_error_detected NULL
#define nvme_dump_registers NULL
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
#define nvme_suspend NULL
#define nvme_resume NULL

static struct pci_error_handlers nvme_err_handler = {
	.error_detected	= nvme_error_detected,
	.mmio_enabled	= nvme_dump_registers,
	.link_reset	= nvme_link_reset,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
};

/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS	0x010802

static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
	.remove		= __devexit_p(nvme_remove),
	.suspend	= nvme_suspend,
	.resume		= nvme_resume,
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
1672 1673 1674 1675 1676
	int result = -EBUSY;

	nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
	if (IS_ERR(nvme_thread))
		return PTR_ERR(nvme_thread);
M
Matthew Wilcox 已提交
1677 1678 1679

	nvme_major = register_blkdev(nvme_major, "nvme");
	if (nvme_major <= 0)
1680
		goto kill_kthread;
M
Matthew Wilcox 已提交
1681 1682

	result = pci_register_driver(&nvme_driver);
1683 1684 1685
	if (result)
		goto unregister_blkdev;
	return 0;
M
Matthew Wilcox 已提交
1686

1687
 unregister_blkdev:
M
Matthew Wilcox 已提交
1688
	unregister_blkdev(nvme_major, "nvme");
1689 1690
 kill_kthread:
	kthread_stop(nvme_thread);
M
Matthew Wilcox 已提交
1691 1692 1693 1694 1695 1696 1697
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
	unregister_blkdev(nvme_major, "nvme");
1698
	kthread_stop(nvme_thread);
M
Matthew Wilcox 已提交
1699 1700 1701 1702
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
M
Matthew Wilcox 已提交
1703
MODULE_VERSION("0.6");
M
Matthew Wilcox 已提交
1704 1705
module_init(nvme_init);
module_exit(nvme_exit);