nvme.c 37.2 KB
Newer Older
M
Matthew Wilcox 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * NVM Express device driver
 * Copyright (c) 2011, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/nvme.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
34
#include <linux/poison.h>
M
Matthew Wilcox 已提交
35 36 37 38 39 40 41 42 43
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/version.h>

#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
44 45
#define IO_TIMEOUT	(5 * HZ)
#define ADMIN_TIMEOUT	(60 * HZ)
M
Matthew Wilcox 已提交
46 47 48 49

static int nvme_major;
module_param(nvme_major, int, 0);

50 51 52
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

M
Matthew Wilcox 已提交
53 54 55 56 57 58 59
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue **queues;
	u32 __iomem *dbs;
	struct pci_dev *pci_dev;
M
Matthew Wilcox 已提交
60
	struct dma_pool *prp_page_pool;
61
	struct dma_pool *prp_small_pool;
M
Matthew Wilcox 已提交
62 63 64 65 66 67
	int instance;
	int queue_count;
	u32 ctrl_config;
	struct msix_entry *entry;
	struct nvme_bar __iomem *bar;
	struct list_head namespaces;
68 69 70
	char serial[20];
	char model[40];
	char firmware_rev[8];
M
Matthew Wilcox 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
};

/*
 * An NVM Express namespace is equivalent to a SCSI LUN
 */
struct nvme_ns {
	struct list_head list;

	struct nvme_dev *dev;
	struct request_queue *queue;
	struct gendisk *disk;

	int ns_id;
	int lba_shift;
};

/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
M
Matthew Wilcox 已提交
93
	struct nvme_dev *dev;
M
Matthew Wilcox 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
	volatile struct nvme_completion *cqes;
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	wait_queue_head_t sq_full;
	struct bio_list sq_cong;
	u32 __iomem *q_db;
	u16 q_depth;
	u16 cq_vector;
	u16 sq_head;
	u16 sq_tail;
	u16 cq_head;
M
Matthew Wilcox 已提交
107
	u16 cq_phase;
M
Matthew Wilcox 已提交
108 109 110
	unsigned long cmdid_data[];
};

111 112
static void nvme_resubmit_bio(struct nvme_queue *nvmeq, struct bio *bio);

M
Matthew Wilcox 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
}

129 130 131 132 133 134 135 136 137 138
struct nvme_cmd_info {
	unsigned long ctx;
	unsigned long timeout;
};

static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
{
	return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
}

M
Matthew Wilcox 已提交
139 140 141 142 143 144 145 146 147 148 149 150
/**
 * alloc_cmdid - Allocate a Command ID
 * @param nvmeq The queue that will be used for this command
 * @param ctx A pointer that will be passed to the handler
 * @param handler The ID of the handler to call
 *
 * Allocate a Command ID for a queue.  The data passed in will
 * be passed to the completion handler.  This is implemented by using
 * the bottom two bits of the ctx pointer to store the handler ID.
 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
 * We can change this if it becomes a problem.
 */
151 152
static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
							unsigned timeout)
M
Matthew Wilcox 已提交
153 154
{
	int depth = nvmeq->q_depth;
155
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
156 157 158 159 160 161 162 163 164 165
	int cmdid;

	BUG_ON((unsigned long)ctx & 3);

	do {
		cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
		if (cmdid >= depth)
			return -EBUSY;
	} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));

166 167
	info[cmdid].ctx = (unsigned long)ctx | handler;
	info[cmdid].timeout = jiffies + timeout;
M
Matthew Wilcox 已提交
168 169 170 171
	return cmdid;
}

static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
172
						int handler, unsigned timeout)
M
Matthew Wilcox 已提交
173 174 175
{
	int cmdid;
	wait_event_killable(nvmeq->sq_full,
176
		(cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
M
Matthew Wilcox 已提交
177 178 179 180
	return (cmdid < 0) ? -EINTR : cmdid;
}

/* If you need more than four handlers, you'll need to change how
181 182
 * alloc_cmdid and nvme_process_cq work.  Consider using a special
 * CMD_CTX value instead, if that works for your situation.
M
Matthew Wilcox 已提交
183 184 185 186 187 188
 */
enum {
	sync_completion_id = 0,
	bio_completion_id,
};

189
#define CMD_CTX_BASE		(POISON_POINTER_DELTA + sync_completion_id)
190 191 192
#define CMD_CTX_CANCELLED	(0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
193

M
Matthew Wilcox 已提交
194 195 196
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
{
	unsigned long data;
197
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
M
Matthew Wilcox 已提交
198

199
	if (cmdid >= nvmeq->q_depth)
200
		return CMD_CTX_INVALID;
201 202
	data = info[cmdid].ctx;
	info[cmdid].ctx = CMD_CTX_COMPLETED;
M
Matthew Wilcox 已提交
203 204 205 206 207
	clear_bit(cmdid, nvmeq->cmdid_data);
	wake_up(&nvmeq->sq_full);
	return data;
}

208
static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid)
209
{
210 211
	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
	info[cmdid].ctx = CMD_CTX_CANCELLED;
212 213
}

M
Matthew Wilcox 已提交
214 215
static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
{
M
Matthew Wilcox 已提交
216 217 218 219 220 221
	int qid, cpu = get_cpu();
	if (cpu < ns->dev->queue_count)
		qid = cpu + 1;
	else
		qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
	return ns->dev->queues[qid];
M
Matthew Wilcox 已提交
222 223 224 225
}

static void put_nvmeq(struct nvme_queue *nvmeq)
{
M
Matthew Wilcox 已提交
226
	put_cpu();
M
Matthew Wilcox 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
}

/**
 * nvme_submit_cmd: Copy a command into a queue and ring the doorbell
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
	unsigned long flags;
	u16 tail;
	/* XXX: Need to check tail isn't going to overrun head */
	spin_lock_irqsave(&nvmeq->q_lock, flags);
	tail = nvmeq->sq_tail;
	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
	writel(tail, nvmeq->q_db);
	if (++tail == nvmeq->q_depth)
		tail = 0;
	nvmeq->sq_tail = tail;
	spin_unlock_irqrestore(&nvmeq->q_lock, flags);

	return 0;
}

253 254 255 256 257 258
struct nvme_prps {
	int npages;
	dma_addr_t first_dma;
	__le64 *list[0];
};

259
static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
260 261 262 263 264 265 266 267 268
{
	const int last_prp = PAGE_SIZE / 8 - 1;
	int i;
	dma_addr_t prp_dma;

	if (!prps)
		return;

	prp_dma = prps->first_dma;
269 270 271

	if (prps->npages == 0)
		dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
272 273 274
	for (i = 0; i < prps->npages; i++) {
		__le64 *prp_list = prps->list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
M
Matthew Wilcox 已提交
275
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
276 277 278 279 280
		prp_dma = next_prp_dma;
	}
	kfree(prps);
}

281
struct nvme_bio {
M
Matthew Wilcox 已提交
282 283
	struct bio *bio;
	int nents;
284
	struct nvme_prps *prps;
M
Matthew Wilcox 已提交
285 286 287 288
	struct scatterlist sg[0];
};

/* XXX: use a mempool */
289
static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
M
Matthew Wilcox 已提交
290
{
291
	return kzalloc(sizeof(struct nvme_bio) +
M
Matthew Wilcox 已提交
292 293 294
			sizeof(struct scatterlist) * nseg, gfp);
}

295
static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
M
Matthew Wilcox 已提交
296
{
297
	nvme_free_prps(nvmeq->dev, nbio->prps);
298
	kfree(nbio);
M
Matthew Wilcox 已提交
299 300 301 302 303
}

static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
{
304 305
	struct nvme_bio *nbio = ctx;
	struct bio *bio = nbio->bio;
M
Matthew Wilcox 已提交
306 307
	u16 status = le16_to_cpup(&cqe->status) >> 1;

308
	dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
M
Matthew Wilcox 已提交
309
			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
310
	free_nbio(nvmeq, nbio);
M
Matthew Wilcox 已提交
311
	bio_endio(bio, status ? -EIO : 0);
312 313 314
	bio = bio_list_pop(&nvmeq->sq_cong);
	if (bio)
		nvme_resubmit_bio(nvmeq, bio);
M
Matthew Wilcox 已提交
315 316
}

M
Matthew Wilcox 已提交
317
/* length is in bytes */
318
static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
319
					struct nvme_common_command *cmd,
M
Matthew Wilcox 已提交
320 321
					struct scatterlist *sg, int length)
{
322
	struct dma_pool *pool;
M
Matthew Wilcox 已提交
323 324 325
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
	int offset = offset_in_page(dma_addr);
326 327 328 329
	__le64 *prp_list;
	dma_addr_t prp_dma;
	int nprps, npages, i, prp_page;
	struct nvme_prps *prps = NULL;
M
Matthew Wilcox 已提交
330 331 332 333

	cmd->prp1 = cpu_to_le64(dma_addr);
	length -= (PAGE_SIZE - offset);
	if (length <= 0)
334
		return prps;
M
Matthew Wilcox 已提交
335 336 337 338 339 340 341 342 343 344 345 346

	dma_len -= (PAGE_SIZE - offset);
	if (dma_len) {
		dma_addr += (PAGE_SIZE - offset);
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

	if (length <= PAGE_SIZE) {
		cmd->prp2 = cpu_to_le64(dma_addr);
347 348 349 350 351 352 353
		return prps;
	}

	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
	npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
	prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC);
	prp_page = 0;
354 355 356 357 358 359 360 361 362
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
		prps->npages = 0;
	} else {
		pool = dev->prp_page_pool;
		prps->npages = npages;
	}

	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
363 364 365 366 367 368 369
	prps->list[prp_page++] = prp_list;
	prps->first_dma = prp_dma;
	cmd->prp2 = cpu_to_le64(prp_dma);
	i = 0;
	for (;;) {
		if (i == PAGE_SIZE / 8 - 1) {
			__le64 *old_prp_list = prp_list;
370
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
			prps->list[prp_page++] = prp_list;
			old_prp_list[i] = cpu_to_le64(prp_dma);
			i = 0;
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
		dma_len -= PAGE_SIZE;
		dma_addr += PAGE_SIZE;
		length -= PAGE_SIZE;
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
M
Matthew Wilcox 已提交
387 388
	}

389
	return prps;
M
Matthew Wilcox 已提交
390 391
}

392
static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
M
Matthew Wilcox 已提交
393 394
		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
395 396 397
	struct bio_vec *bvec, *bvprv = NULL;
	struct scatterlist *sg = NULL;
	int i, nsegs = 0;
M
Matthew Wilcox 已提交
398

399
	sg_init_table(nbio->sg, psegs);
M
Matthew Wilcox 已提交
400
	bio_for_each_segment(bvec, bio, i) {
401 402 403 404 405 406 407 408 409 410
		if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
			sg->length += bvec->bv_len;
		} else {
			/* Check bvprv && offset == 0 */
			sg = sg ? sg + 1 : nbio->sg;
			sg_set_page(sg, bvec->bv_page, bvec->bv_len,
							bvec->bv_offset);
			nsegs++;
		}
		bvprv = bvec;
M
Matthew Wilcox 已提交
411
	}
412
	nbio->nents = nsegs;
413
	sg_mark_end(sg);
414
	return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir);
M
Matthew Wilcox 已提交
415 416 417 418 419
}

static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
								struct bio *bio)
{
M
Matthew Wilcox 已提交
420
	struct nvme_command *cmnd;
421
	struct nvme_bio *nbio;
M
Matthew Wilcox 已提交
422
	enum dma_data_direction dma_dir;
423
	int cmdid, result = -ENOMEM;
M
Matthew Wilcox 已提交
424 425 426 427
	u16 control;
	u32 dsmgmt;
	int psegs = bio_phys_segments(ns->queue, bio);

428
	nbio = alloc_nbio(psegs, GFP_ATOMIC);
429
	if (!nbio)
430
		goto nomem;
431
	nbio->bio = bio;
M
Matthew Wilcox 已提交
432

433
	result = -EBUSY;
434
	cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
M
Matthew Wilcox 已提交
435
	if (unlikely(cmdid < 0))
436
		goto free_nbio;
M
Matthew Wilcox 已提交
437 438 439 440 441 442 443 444 445 446 447

	control = 0;
	if (bio->bi_rw & REQ_FUA)
		control |= NVME_RW_FUA;
	if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	dsmgmt = 0;
	if (bio->bi_rw & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

M
Matthew Wilcox 已提交
448
	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
M
Matthew Wilcox 已提交
449

450
	memset(cmnd, 0, sizeof(*cmnd));
M
Matthew Wilcox 已提交
451
	if (bio_data_dir(bio)) {
M
Matthew Wilcox 已提交
452
		cmnd->rw.opcode = nvme_cmd_write;
M
Matthew Wilcox 已提交
453 454
		dma_dir = DMA_TO_DEVICE;
	} else {
M
Matthew Wilcox 已提交
455
		cmnd->rw.opcode = nvme_cmd_read;
M
Matthew Wilcox 已提交
456 457 458
		dma_dir = DMA_FROM_DEVICE;
	}

459
	result = -ENOMEM;
460
	if (nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs) == 0)
461
		goto free_nbio;
M
Matthew Wilcox 已提交
462

M
Matthew Wilcox 已提交
463 464 465
	cmnd->rw.flags = 1;
	cmnd->rw.command_id = cmdid;
	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
466
	nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
467
								bio->bi_size);
M
Matthew Wilcox 已提交
468 469 470 471
	cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
	cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
M
Matthew Wilcox 已提交
472 473 474 475 476

	writel(nvmeq->sq_tail, nvmeq->q_db);
	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;

477 478
	return 0;

479 480
 free_nbio:
	free_nbio(nvmeq, nbio);
481 482
 nomem:
	return result;
M
Matthew Wilcox 已提交
483 484
}

485 486 487 488 489 490 491 492 493 494
static void nvme_resubmit_bio(struct nvme_queue *nvmeq, struct bio *bio)
{
	struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
	if (nvme_submit_bio_queue(nvmeq, ns, bio))
		bio_list_add_head(&nvmeq->sq_cong, bio);
	else if (bio_list_empty(&nvmeq->sq_cong))
		blk_clear_queue_congested(ns->queue, rw_is_sync(bio->bi_rw));
	/* XXX: Need to duplicate the logic from __freed_request here */
}

M
Matthew Wilcox 已提交
495 496 497 498 499 500 501 502
/*
 * NB: return value of non-zero would mean that we were a stacking driver.
 * make_request must always succeed.
 */
static int nvme_make_request(struct request_queue *q, struct bio *bio)
{
	struct nvme_ns *ns = q->queuedata;
	struct nvme_queue *nvmeq = get_nvmeq(ns);
503 504 505 506 507 508 509 510
	int result = -EBUSY;

	spin_lock_irq(&nvmeq->q_lock);
	if (bio_list_empty(&nvmeq->sq_cong))
		result = nvme_submit_bio_queue(nvmeq, ns, bio);
	if (unlikely(result)) {
		if (bio_list_empty(&nvmeq->sq_cong))
			add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
M
Matthew Wilcox 已提交
511 512
		bio_list_add(&nvmeq->sq_cong, bio);
	}
513 514

	spin_unlock_irq(&nvmeq->q_lock);
M
Matthew Wilcox 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	put_nvmeq(nvmeq);

	return 0;
}

struct sync_cmd_info {
	struct task_struct *task;
	u32 result;
	int status;
};

static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
						struct nvme_completion *cqe)
{
	struct sync_cmd_info *cmdinfo = ctx;
530 531
	if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED)
		return;
532 533 534 535 536 537
	if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
		dev_warn(nvmeq->q_dmadev,
				"completed id %d twice on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
538 539 540 541 542 543
	if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
		dev_warn(nvmeq->q_dmadev,
				"invalid id %d completed on queue %d\n",
				cqe->command_id, le16_to_cpup(&cqe->sq_id));
		return;
	}
M
Matthew Wilcox 已提交
544 545 546 547 548 549 550 551 552 553
	cmdinfo->result = le32_to_cpup(&cqe->result);
	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
	wake_up_process(cmdinfo->task);
}

typedef void (*completion_fn)(struct nvme_queue *, void *,
						struct nvme_completion *);

static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
M
Matthew Wilcox 已提交
554
	u16 head, phase;
M
Matthew Wilcox 已提交
555 556 557 558 559 560 561

	static const completion_fn completions[4] = {
		[sync_completion_id] = sync_completion,
		[bio_completion_id]  = bio_completion,
	};

	head = nvmeq->cq_head;
M
Matthew Wilcox 已提交
562
	phase = nvmeq->cq_phase;
M
Matthew Wilcox 已提交
563 564 565 566 567 568

	for (;;) {
		unsigned long data;
		void *ptr;
		unsigned char handler;
		struct nvme_completion cqe = nvmeq->cqes[head];
M
Matthew Wilcox 已提交
569
		if ((le16_to_cpu(cqe.status) & 1) != phase)
M
Matthew Wilcox 已提交
570 571 572 573
			break;
		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
		if (++head == nvmeq->q_depth) {
			head = 0;
M
Matthew Wilcox 已提交
574
			phase = !phase;
M
Matthew Wilcox 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588
		}

		data = free_cmdid(nvmeq, cqe.command_id);
		handler = data & 3;
		ptr = (void *)(data & ~3UL);
		completions[handler](nvmeq, ptr, &cqe);
	}

	/* If the controller ignores the cq head doorbell and continuously
	 * writes to the queue, it is theoretically possible to wrap around
	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
	 * requires that 0.1% of your interrupts are handled, so this isn't
	 * a big problem.
	 */
M
Matthew Wilcox 已提交
589
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
M
Matthew Wilcox 已提交
590 591 592 593
		return IRQ_NONE;

	writel(head, nvmeq->q_db + 1);
	nvmeq->cq_head = head;
M
Matthew Wilcox 已提交
594
	nvmeq->cq_phase = phase;
M
Matthew Wilcox 已提交
595 596 597 598 599

	return IRQ_HANDLED;
}

static irqreturn_t nvme_irq(int irq, void *data)
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
	result = nvme_process_cq(nvmeq);
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
	struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
	if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
		return IRQ_NONE;
	return IRQ_WAKE_THREAD;
}

618 619 620
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
	spin_lock_irq(&nvmeq->q_lock);
621
	cancel_cmdid_data(nvmeq, cmdid);
622 623 624
	spin_unlock_irq(&nvmeq->q_lock);
}

M
Matthew Wilcox 已提交
625 626 627 628
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
629
static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
630
			struct nvme_command *cmd, u32 *result, unsigned timeout)
M
Matthew Wilcox 已提交
631 632 633 634 635 636 637
{
	int cmdid;
	struct sync_cmd_info cmdinfo;

	cmdinfo.task = current;
	cmdinfo.status = -EINTR;

638 639
	cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
								timeout);
M
Matthew Wilcox 已提交
640 641 642 643
	if (cmdid < 0)
		return cmdid;
	cmd->common.command_id = cmdid;

644 645
	set_current_state(TASK_KILLABLE);
	nvme_submit_cmd(nvmeq, cmd);
M
Matthew Wilcox 已提交
646 647
	schedule();

648 649 650 651 652
	if (cmdinfo.status == -EINTR) {
		nvme_abort_command(nvmeq, cmdid);
		return -EINTR;
	}

M
Matthew Wilcox 已提交
653 654 655 656 657 658 659 660 661
	if (result)
		*result = cmdinfo.result;

	return cmdinfo.status;
}

static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
								u32 *result)
{
662
	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
M
Matthew Wilcox 已提交
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
}

static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
	int status;
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	int status;
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;

	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	if (status)
		return -EIO;
	return 0;
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
	struct nvme_queue *nvmeq = dev->queues[qid];

	free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq);

	/* Don't tell the adapter to delete the admin queue */
	if (qid) {
		adapter_delete_sq(dev, qid);
		adapter_delete_cq(dev, qid);
	}

	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
							int depth, int vector)
{
	struct device *dmadev = &dev->pci_dev->dev;
755
	unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
M
Matthew Wilcox 已提交
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
	if (!nvmeq)
		return NULL;

	nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
					&nvmeq->cq_dma_addr, GFP_KERNEL);
	if (!nvmeq->cqes)
		goto free_nvmeq;
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));

	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
	if (!nvmeq->sq_cmds)
		goto free_cqdma;

	nvmeq->q_dmadev = dmadev;
M
Matthew Wilcox 已提交
772
	nvmeq->dev = dev;
M
Matthew Wilcox 已提交
773 774
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
M
Matthew Wilcox 已提交
775
	nvmeq->cq_phase = 1;
M
Matthew Wilcox 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	init_waitqueue_head(&nvmeq->sq_full);
	bio_list_init(&nvmeq->sq_cong);
	nvmeq->q_db = &dev->dbs[qid * 2];
	nvmeq->q_depth = depth;
	nvmeq->cq_vector = vector;

	return nvmeq;

 free_cqdma:
	dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

792 793 794
static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
							const char *name)
{
795 796
	if (use_threaded_interrupts)
		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
797
					nvme_irq_check, nvme_irq,
798 799
					IRQF_DISABLED | IRQF_SHARED,
					name, nvmeq);
800 801 802 803
	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
				IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}

M
Matthew Wilcox 已提交
804 805 806 807 808 809
static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
					int qid, int cq_size, int vector)
{
	int result;
	struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);

810 811 812
	if (!nvmeq)
		return NULL;

M
Matthew Wilcox 已提交
813 814 815 816 817 818 819 820
	result = adapter_alloc_cq(dev, qid, nvmeq);
	if (result < 0)
		goto free_nvmeq;

	result = adapter_alloc_sq(dev, qid, nvmeq);
	if (result < 0)
		goto release_cq;

821
	result = queue_request_irq(dev, nvmeq, "nvme");
M
Matthew Wilcox 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	if (result < 0)
		goto release_sq;

	return nvmeq;

 release_sq:
	adapter_delete_sq(dev, qid);
 release_cq:
	adapter_delete_cq(dev, qid);
 free_nvmeq:
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
	return NULL;
}

static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
{
	int result;
	u32 aqa;
	struct nvme_queue *nvmeq;

	dev->dbs = ((void __iomem *)dev->bar) + 4096;

	nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
849 850
	if (!nvmeq)
		return -ENOMEM;
M
Matthew Wilcox 已提交
851 852 853 854 855 856 857 858

	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

	dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
	dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;

859
	writel(0, &dev->bar->cc);
M
Matthew Wilcox 已提交
860 861 862 863 864 865 866 867 868 869 870
	writel(aqa, &dev->bar->aqa);
	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
	writel(dev->ctrl_config, &dev->bar->cc);

	while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
		msleep(100);
		if (fatal_signal_pending(current))
			return -EINTR;
	}

871
	result = queue_request_irq(dev, nvmeq, "nvme admin");
M
Matthew Wilcox 已提交
872 873 874 875
	dev->queues[0] = nvmeq;
	return result;
}

876 877 878
static int nvme_map_user_pages(struct nvme_dev *dev, int write,
				unsigned long addr, unsigned length,
				struct scatterlist **sgp)
M
Matthew Wilcox 已提交
879
{
880
	int i, err, count, nents, offset;
881 882
	struct scatterlist *sg;
	struct page **pages;
883 884 885

	if (addr & 3)
		return -EINVAL;
886 887 888
	if (!length)
		return -EINVAL;

889
	offset = offset_in_page(addr);
890 891
	count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
	pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
892 893 894 895 896 897 898

	err = get_user_pages_fast(addr, count, 1, pages);
	if (err < count) {
		count = err;
		err = -EFAULT;
		goto put_pages;
	}
899 900

	sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
901
	sg_init_table(sg, count);
M
Matthew Wilcox 已提交
902
	sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
903 904 905 906 907 908 909 910 911
	length -= (PAGE_SIZE - offset);
	for (i = 1; i < count; i++) {
		sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
		length -= PAGE_SIZE;
	}

	err = -ENOMEM;
	nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
				write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
912 913
	if (!nents)
		goto put_pages;
M
Matthew Wilcox 已提交
914

915 916 917
	kfree(pages);
	*sgp = sg;
	return nents;
M
Matthew Wilcox 已提交
918

919 920 921 922 923 924
 put_pages:
	for (i = 0; i < count; i++)
		put_page(pages[i]);
	kfree(pages);
	return err;
}
M
Matthew Wilcox 已提交
925

926 927 928 929 930
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
				unsigned long addr, int length,
				struct scatterlist *sg, int nents)
{
	int i, count;
M
Matthew Wilcox 已提交
931

932
	count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
933
	dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
934

935
	for (i = 0; i < count; i++)
936 937
		put_page(sg_page(&sg[i]));
}
M
Matthew Wilcox 已提交
938

939 940 941 942 943 944
static int nvme_submit_user_admin_command(struct nvme_dev *dev,
					unsigned long addr, unsigned length,
					struct nvme_command *cmd)
{
	int err, nents;
	struct scatterlist *sg;
945
	struct nvme_prps *prps;
946 947 948 949

	nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
	if (nents < 0)
		return nents;
950
	prps = nvme_setup_prps(dev, &cmd->common, sg, length);
951 952
	err = nvme_submit_admin_cmd(dev, cmd, NULL);
	nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
953
	nvme_free_prps(dev, prps);
954
	return err ? -EIO : 0;
M
Matthew Wilcox 已提交
955 956
}

957
static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
M
Matthew Wilcox 已提交
958 959 960
{
	struct nvme_command c;

961 962 963 964 965 966 967 968 969 970 971
	memset(&c, 0, sizeof(c));
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
	c.identify.cns = cpu_to_le32(cns);

	return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
}

static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
{
	struct nvme_command c;
M
Matthew Wilcox 已提交
972 973 974 975 976 977

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_get_features;
	c.features.nsid = cpu_to_le32(ns->ns_id);
	c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);

978
	return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
M
Matthew Wilcox 已提交
979 980
}

M
Matthew Wilcox 已提交
981 982 983 984 985 986 987 988 989 990
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_queue *nvmeq;
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length;
	u32 result;
	int nents, status;
	struct scatterlist *sg;
991
	struct nvme_prps *prps;
M
Matthew Wilcox 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
	length = io.nblocks << io.block_shift;
	nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
	if (nents < 0)
		return nents;

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
	c.rw.nsid = cpu_to_le32(io.nsid);
	c.rw.slba = cpu_to_le64(io.slba);
	c.rw.length = cpu_to_le16(io.nblocks - 1);
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);	/* XXX: endian? */
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);
	/* XXX: metadata */
1012
	prps = nvme_setup_prps(dev, &c.common, sg, length);
M
Matthew Wilcox 已提交
1013

1014
	nvmeq = get_nvmeq(ns);
1015 1016 1017 1018 1019
	/* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
	 * disabled.  We may be preempted at any point, and be rescheduled
	 * to a different CPU.  That will cause cacheline bouncing, but no
	 * additional races since q_lock already protects against other CPUs.
	 */
M
Matthew Wilcox 已提交
1020
	put_nvmeq(nvmeq);
1021
	status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT);
M
Matthew Wilcox 已提交
1022 1023

	nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
1024
	nvme_free_prps(dev, prps);
M
Matthew Wilcox 已提交
1025 1026 1027 1028
	put_user(result, &uio->result);
	return status;
}

1029 1030 1031 1032 1033 1034 1035 1036
static int nvme_download_firmware(struct nvme_ns *ns,
						struct nvme_dlfw __user *udlfw)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_dlfw dlfw;
	struct nvme_command c;
	int nents, status;
	struct scatterlist *sg;
1037
	struct nvme_prps *prps;
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051

	if (copy_from_user(&dlfw, udlfw, sizeof(dlfw)))
		return -EFAULT;
	if (dlfw.length >= (1 << 30))
		return -EINVAL;

	nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg);
	if (nents < 0)
		return nents;

	memset(&c, 0, sizeof(c));
	c.dlfw.opcode = nvme_admin_download_fw;
	c.dlfw.numd = cpu_to_le32(dlfw.length);
	c.dlfw.offset = cpu_to_le32(dlfw.offset);
1052
	prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4);
1053 1054 1055

	status = nvme_submit_admin_cmd(dev, &c, NULL);
	nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
1056
	nvme_free_prps(dev, prps);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	return status;
}

static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg)
{
	struct nvme_dev *dev = ns->dev;
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_activate_fw;
	c.common.rsvd10[0] = cpu_to_le32(arg);

	return nvme_submit_admin_cmd(dev, &c, NULL);
}

M
Matthew Wilcox 已提交
1072 1073 1074 1075 1076 1077 1078
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
							unsigned long arg)
{
	struct nvme_ns *ns = bdev->bd_disk->private_data;

	switch (cmd) {
	case NVME_IOCTL_IDENTIFY_NS:
1079
		return nvme_identify(ns, arg, 0);
M
Matthew Wilcox 已提交
1080
	case NVME_IOCTL_IDENTIFY_CTRL:
1081
		return nvme_identify(ns, arg, 1);
M
Matthew Wilcox 已提交
1082
	case NVME_IOCTL_GET_RANGE_TYPE:
1083
		return nvme_get_range_type(ns, arg);
M
Matthew Wilcox 已提交
1084 1085
	case NVME_IOCTL_SUBMIT_IO:
		return nvme_submit_io(ns, (void __user *)arg);
1086 1087 1088 1089
	case NVME_IOCTL_DOWNLOAD_FW:
		return nvme_download_firmware(ns, (void __user *)arg);
	case NVME_IOCTL_ACTIVATE_FW:
		return nvme_activate_firmware(ns, arg);
M
Matthew Wilcox 已提交
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	default:
		return -ENOTTY;
	}
}

static const struct block_device_operations nvme_fops = {
	.owner		= THIS_MODULE,
	.ioctl		= nvme_ioctl,
};

static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
	int lbaf;

	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
		return NULL;

	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
	if (!ns)
		return NULL;
	ns->queue = blk_alloc_queue(GFP_KERNEL);
	if (!ns->queue)
		goto out_free_ns;
	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
				QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
	blk_queue_make_request(ns->queue, nvme_make_request);
	ns->dev = dev;
	ns->queue->queuedata = ns;

	disk = alloc_disk(NVME_MINORS);
	if (!disk)
		goto out_free_queue;
	ns->ns_id = index;
	ns->disk = disk;
	lbaf = id->flbas & 0xf;
	ns->lba_shift = id->lbaf[lbaf].ds;

	disk->major = nvme_major;
	disk->minors = NVME_MINORS;
	disk->first_minor = NVME_MINORS * index;
	disk->fops = &nvme_fops;
	disk->private_data = ns;
	disk->queue = ns->queue;
1136
	disk->driverfs_dev = &dev->pci_dev->dev;
M
Matthew Wilcox 已提交
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

	return ns;

 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
	return NULL;
}

static void nvme_ns_free(struct nvme_ns *ns)
{
	put_disk(ns->disk);
	blk_cleanup_queue(ns->queue);
	kfree(ns);
}

1156
static int set_queue_count(struct nvme_dev *dev, int count)
M
Matthew Wilcox 已提交
1157 1158 1159 1160
{
	int status;
	u32 result;
	struct nvme_command c;
1161
	u32 q_count = (count - 1) | ((count - 1) << 16);
M
Matthew Wilcox 已提交
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_get_features;
	c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
	c.features.dword11 = cpu_to_le32(q_count);

	status = nvme_submit_admin_cmd(dev, &c, &result);
	if (status)
		return -EIO;
	return min(result & 0xffff, result >> 16) + 1;
}

static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
{
M
Matthew Wilcox 已提交
1176
	int result, cpu, i, nr_queues;
M
Matthew Wilcox 已提交
1177

M
Matthew Wilcox 已提交
1178 1179 1180 1181 1182 1183
	nr_queues = num_online_cpus();
	result = set_queue_count(dev, nr_queues);
	if (result < 0)
		return result;
	if (result < nr_queues)
		nr_queues = result;
M
Matthew Wilcox 已提交
1184

M
Matthew Wilcox 已提交
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	/* Deregister the admin queue's interrupt */
	free_irq(dev->entry[0].vector, dev->queues[0]);

	for (i = 0; i < nr_queues; i++)
		dev->entry[i].entry = i;
	for (;;) {
		result = pci_enable_msix(dev->pci_dev, dev->entry, nr_queues);
		if (result == 0) {
			break;
		} else if (result > 0) {
			nr_queues = result;
			continue;
		} else {
			nr_queues = 1;
			break;
		}
	}

	result = queue_request_irq(dev, dev->queues[0], "nvme admin");
	/* XXX: handle failure here */

	cpu = cpumask_first(cpu_online_mask);
	for (i = 0; i < nr_queues; i++) {
		irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
		cpu = cpumask_next(cpu, cpu_online_mask);
	}

	for (i = 0; i < nr_queues; i++) {
		dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
							NVME_Q_DEPTH, i);
		if (!dev->queues[i + 1])
			return -ENOMEM;
		dev->queue_count++;
	}
M
Matthew Wilcox 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234

	return 0;
}

static void nvme_free_queues(struct nvme_dev *dev)
{
	int i;

	for (i = dev->queue_count - 1; i >= 0; i--)
		nvme_free_queue(dev, i);
}

static int __devinit nvme_dev_add(struct nvme_dev *dev)
{
	int res, nn, i;
	struct nvme_ns *ns, *next;
1235
	struct nvme_id_ctrl *ctrl;
M
Matthew Wilcox 已提交
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	void *id;
	dma_addr_t dma_addr;
	struct nvme_command cid, crt;

	res = nvme_setup_io_queues(dev);
	if (res)
		return res;

	/* XXX: Switch to a SG list once prp2 works */
	id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
								GFP_KERNEL);

	memset(&cid, 0, sizeof(cid));
	cid.identify.opcode = nvme_admin_identify;
	cid.identify.nsid = 0;
	cid.identify.prp1 = cpu_to_le64(dma_addr);
	cid.identify.cns = cpu_to_le32(1);

	res = nvme_submit_admin_cmd(dev, &cid, NULL);
	if (res) {
		res = -EIO;
		goto out_free;
	}

1260 1261 1262 1263 1264
	ctrl = id;
	nn = le32_to_cpup(&ctrl->nn);
	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
M
Matthew Wilcox 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322

	cid.identify.cns = 0;
	memset(&crt, 0, sizeof(crt));
	crt.features.opcode = nvme_admin_get_features;
	crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
	crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);

	for (i = 0; i < nn; i++) {
		cid.identify.nsid = cpu_to_le32(i);
		res = nvme_submit_admin_cmd(dev, &cid, NULL);
		if (res)
			continue;

		if (((struct nvme_id_ns *)id)->ncap == 0)
			continue;

		crt.features.nsid = cpu_to_le32(i);
		res = nvme_submit_admin_cmd(dev, &crt, NULL);
		if (res)
			continue;

		ns = nvme_alloc_ns(dev, i, id, id + 4096);
		if (ns)
			list_add_tail(&ns->list, &dev->namespaces);
	}
	list_for_each_entry(ns, &dev->namespaces, list)
		add_disk(ns->disk);

	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
	return 0;

 out_free:
	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
		nvme_ns_free(ns);
	}

	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
	return res;
}

static int nvme_dev_remove(struct nvme_dev *dev)
{
	struct nvme_ns *ns, *next;

	/* TODO: wait all I/O finished or cancel them */

	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
		list_del(&ns->list);
		del_gendisk(ns->disk);
		nvme_ns_free(ns);
	}

	nvme_free_queues(dev);

	return 0;
}

M
Matthew Wilcox 已提交
1323 1324 1325 1326 1327 1328 1329 1330
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
	struct device *dmadev = &dev->pci_dev->dev;
	dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
						PAGE_SIZE, PAGE_SIZE, 0);
	if (!dev->prp_page_pool)
		return -ENOMEM;

1331 1332 1333 1334 1335 1336 1337
	/* Optimisation for I/Os between 4k and 128k */
	dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
						256, 256, 0);
	if (!dev->prp_small_pool) {
		dma_pool_destroy(dev->prp_page_pool);
		return -ENOMEM;
	}
M
Matthew Wilcox 已提交
1338 1339 1340 1341 1342 1343
	return 0;
}

static void nvme_release_prp_pools(struct nvme_dev *dev)
{
	dma_pool_destroy(dev->prp_page_pool);
1344
	dma_pool_destroy(dev->prp_small_pool);
M
Matthew Wilcox 已提交
1345 1346
}

M
Matthew Wilcox 已提交
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
/* XXX: Use an ida or something to let remove / add work correctly */
static void nvme_set_instance(struct nvme_dev *dev)
{
	static int instance;
	dev->instance = instance++;
}

static void nvme_release_instance(struct nvme_dev *dev)
{
}

static int __devinit nvme_probe(struct pci_dev *pdev,
						const struct pci_device_id *id)
{
M
Matthew Wilcox 已提交
1361
	int bars, result = -ENOMEM;
M
Matthew Wilcox 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370
	struct nvme_dev *dev;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
	dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
								GFP_KERNEL);
	if (!dev->entry)
		goto free;
M
Matthew Wilcox 已提交
1371 1372
	dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
								GFP_KERNEL);
M
Matthew Wilcox 已提交
1373 1374 1375
	if (!dev->queues)
		goto free;

1376 1377
	if (pci_enable_device_mem(pdev))
		goto free;
M
Matthew Wilcox 已提交
1378
	pci_set_master(pdev);
M
Matthew Wilcox 已提交
1379 1380 1381
	bars = pci_select_bars(pdev, IORESOURCE_MEM);
	if (pci_request_selected_regions(pdev, bars, "nvme"))
		goto disable;
1382

M
Matthew Wilcox 已提交
1383 1384 1385
	INIT_LIST_HEAD(&dev->namespaces);
	dev->pci_dev = pdev;
	pci_set_drvdata(pdev, dev);
1386 1387
	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
M
Matthew Wilcox 已提交
1388
	nvme_set_instance(dev);
1389
	dev->entry[0].vector = pdev->irq;
M
Matthew Wilcox 已提交
1390

M
Matthew Wilcox 已提交
1391 1392 1393 1394
	result = nvme_setup_prp_pools(dev);
	if (result)
		goto disable_msix;

M
Matthew Wilcox 已提交
1395 1396 1397
	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
	if (!dev->bar) {
		result = -ENOMEM;
M
Matthew Wilcox 已提交
1398
		goto disable_msix;
M
Matthew Wilcox 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	}

	result = nvme_configure_admin_queue(dev);
	if (result)
		goto unmap;
	dev->queue_count++;

	result = nvme_dev_add(dev);
	if (result)
		goto delete;
	return 0;

 delete:
	nvme_free_queues(dev);
 unmap:
	iounmap(dev->bar);
M
Matthew Wilcox 已提交
1415
 disable_msix:
M
Matthew Wilcox 已提交
1416 1417
	pci_disable_msix(pdev);
	nvme_release_instance(dev);
M
Matthew Wilcox 已提交
1418
	nvme_release_prp_pools(dev);
M
Matthew Wilcox 已提交
1419
 disable:
1420
	pci_disable_device(pdev);
M
Matthew Wilcox 已提交
1421
	pci_release_regions(pdev);
M
Matthew Wilcox 已提交
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
 free:
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
	return result;
}

static void __devexit nvme_remove(struct pci_dev *pdev)
{
	struct nvme_dev *dev = pci_get_drvdata(pdev);
	nvme_dev_remove(dev);
	pci_disable_msix(pdev);
	iounmap(dev->bar);
	nvme_release_instance(dev);
M
Matthew Wilcox 已提交
1436
	nvme_release_prp_pools(dev);
1437
	pci_disable_device(pdev);
M
Matthew Wilcox 已提交
1438
	pci_release_regions(pdev);
M
Matthew Wilcox 已提交
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev);
}

/* These functions are yet to be implemented */
#define nvme_error_detected NULL
#define nvme_dump_registers NULL
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
#define nvme_suspend NULL
#define nvme_resume NULL

static struct pci_error_handlers nvme_err_handler = {
	.error_detected	= nvme_error_detected,
	.mmio_enabled	= nvme_dump_registers,
	.link_reset	= nvme_link_reset,
	.slot_reset	= nvme_slot_reset,
	.resume		= nvme_error_resume,
};

/* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS	0x010802

static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);

static struct pci_driver nvme_driver = {
	.name		= "nvme",
	.id_table	= nvme_id_table,
	.probe		= nvme_probe,
	.remove		= __devexit_p(nvme_remove),
	.suspend	= nvme_suspend,
	.resume		= nvme_resume,
	.err_handler	= &nvme_err_handler,
};

static int __init nvme_init(void)
{
	int result;

	nvme_major = register_blkdev(nvme_major, "nvme");
	if (nvme_major <= 0)
		return -EBUSY;

	result = pci_register_driver(&nvme_driver);
	if (!result)
		return 0;

	unregister_blkdev(nvme_major, "nvme");
	return result;
}

static void __exit nvme_exit(void)
{
	pci_unregister_driver(&nvme_driver);
	unregister_blkdev(nvme_major, "nvme");
}

MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
M
Matthew Wilcox 已提交
1504
MODULE_VERSION("0.2");
M
Matthew Wilcox 已提交
1505 1506
module_init(nvme_init);
module_exit(nvme_exit);