lightnvm.c 23.0 KB
Newer Older
M
Matias Bjørling 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * nvme-lightnvm.c - LightNVM NVMe device
 *
 * Copyright (C) 2014-2015 IT University of Copenhagen
 * Initial release: Matias Bjorling <mb@lightnvm.io>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; see the file COPYING.  If not, write to
 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
 * USA.
 *
 */

#include "nvme.h"

#include <linux/nvme.h>
#include <linux/bitops.h>
#include <linux/lightnvm.h>
#include <linux/vmalloc.h>
29 30
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
M
Matias Bjørling 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65

enum nvme_nvm_admin_opcode {
	nvme_nvm_admin_identity		= 0xe2,
	nvme_nvm_admin_get_bb_tbl	= 0xf2,
	nvme_nvm_admin_set_bb_tbl	= 0xf1,
};

struct nvme_nvm_ph_rw {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd2;
	__le64			metadata;
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			length;
	__le16			control;
	__le32			dsmgmt;
	__le64			resv;
};

struct nvme_nvm_identity {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
	__le32			chnl_off;
	__u32			rsvd11[5];
};

66
struct nvme_nvm_getbbtbl {
M
Matias Bjørling 已提交
67 68 69 70 71 72 73
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	__le64			spba;
	__u32			rsvd4[4];
};

struct nvme_nvm_setbbtbl {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__le64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			nlb;
	__u8			value;
	__u8			rsvd3;
	__u32			rsvd4[3];
M
Matias Bjørling 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
};

struct nvme_nvm_erase_blk {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			length;
	__le16			control;
	__le32			dsmgmt;
	__le64			resv;
};

struct nvme_nvm_command {
	union {
		struct nvme_common_command common;
		struct nvme_nvm_identity identity;
		struct nvme_nvm_ph_rw ph_rw;
113 114
		struct nvme_nvm_getbbtbl get_bb;
		struct nvme_nvm_setbbtbl set_bb;
M
Matias Bjørling 已提交
115 116 117 118
		struct nvme_nvm_erase_blk erase;
	};
};

119
#define NVME_NVM_LP_MLC_PAIRS 886
120
struct nvme_nvm_lp_mlc {
121
	__le16			num_pairs;
122
	__u8			pairs[NVME_NVM_LP_MLC_PAIRS];
123 124 125 126 127 128 129
};

struct nvme_nvm_lp_tbl {
	__u8			id[8];
	struct nvme_nvm_lp_mlc	mlc;
};

M
Matias Bjørling 已提交
130 131 132 133 134 135 136
struct nvme_nvm_id_group {
	__u8			mtype;
	__u8			fmtype;
	__le16			res16;
	__u8			num_ch;
	__u8			num_lun;
	__u8			num_pln;
137
	__u8			rsvd1;
138
	__le16			num_chk;
M
Matias Bjørling 已提交
139 140 141 142
	__le16			num_pg;
	__le16			fpg_sz;
	__le16			csecs;
	__le16			sos;
143
	__le16			rsvd2;
M
Matias Bjørling 已提交
144 145 146 147 148 149 150
	__le32			trdt;
	__le32			trdm;
	__le32			tprt;
	__le32			tprm;
	__le32			tbet;
	__le32			tbem;
	__le32			mpos;
151
	__le32			mccap;
M
Matias Bjørling 已提交
152
	__le16			cpar;
153 154
	__u8			reserved[10];
	struct nvme_nvm_lp_tbl lptbl;
M
Matias Bjørling 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
} __packed;

struct nvme_nvm_addr_format {
	__u8			ch_offset;
	__u8			ch_len;
	__u8			lun_offset;
	__u8			lun_len;
	__u8			pln_offset;
	__u8			pln_len;
	__u8			blk_offset;
	__u8			blk_len;
	__u8			pg_offset;
	__u8			pg_len;
	__u8			sect_offset;
	__u8			sect_len;
	__u8			res[4];
} __packed;

struct nvme_nvm_id {
	__u8			ver_id;
	__u8			vmnt;
	__u8			cgrps;
177
	__u8			res;
M
Matias Bjørling 已提交
178 179 180
	__le32			cap;
	__le32			dom;
	struct nvme_nvm_addr_format ppaf;
181
	__u8			resv[228];
M
Matias Bjørling 已提交
182 183 184
	struct nvme_nvm_id_group groups[4];
} __packed;

185 186 187 188 189 190 191 192 193 194 195 196 197 198
struct nvme_nvm_bb_tbl {
	__u8	tblid[4];
	__le16	verid;
	__le16	revid;
	__le32	rvsd1;
	__le32	tblks;
	__le32	tfact;
	__le32	tgrown;
	__le32	tdresv;
	__le32	thresv;
	__le32	rsvd2[8];
	__u8	blk[0];
};

M
Matias Bjørling 已提交
199 200 201 202 203 204 205
/*
 * Check we didn't inadvertently grow the command struct
 */
static inline void _nvme_nvm_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
206 207
	BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
M
Matias Bjørling 已提交
208 209
	BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
210
	BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
211
	BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
212
	BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
M
Matias Bjørling 已提交
213 214 215 216 217
}

static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
	struct nvme_nvm_id_group *src;
218 219
	struct nvm_id_group *grp;
	int sec_per_pg, sec_per_pl, pg_per_blk;
220 221 222 223 224

	if (nvme_nvm_id->cgrps != 1)
		return -EINVAL;

	src = &nvme_nvm_id->groups[0];
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	grp = &nvm_id->grp;

	grp->mtype = src->mtype;
	grp->fmtype = src->fmtype;

	grp->num_ch = src->num_ch;
	grp->num_lun = src->num_lun;

	grp->num_chk = le16_to_cpu(src->num_chk);
	grp->csecs = le16_to_cpu(src->csecs);
	grp->sos = le16_to_cpu(src->sos);

	pg_per_blk = le16_to_cpu(src->num_pg);
	sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
	sec_per_pl = sec_per_pg * src->num_pln;
	grp->clba = sec_per_pl * pg_per_blk;
	grp->ws_per_chk = pg_per_blk;

	grp->mpos = le32_to_cpu(src->mpos);
	grp->cpar = le16_to_cpu(src->cpar);
	grp->mccap = le32_to_cpu(src->mccap);

	grp->ws_opt = grp->ws_min = sec_per_pg;
	grp->ws_seq = NVM_IO_SNGL_ACCESS;

	if (grp->mpos & 0x020202) {
		grp->ws_seq = NVM_IO_DUAL_ACCESS;
		grp->ws_opt <<= 1;
	} else if (grp->mpos & 0x040404) {
		grp->ws_seq = NVM_IO_QUAD_ACCESS;
		grp->ws_opt <<= 2;
	}

	grp->trdt = le32_to_cpu(src->trdt);
	grp->trdm = le32_to_cpu(src->trdm);
	grp->tprt = le32_to_cpu(src->tprt);
	grp->tprm = le32_to_cpu(src->tprm);
	grp->tbet = le32_to_cpu(src->tbet);
	grp->tbem = le32_to_cpu(src->tbem);

	/* 1.2 compatibility */
	grp->num_pln = src->num_pln;
	grp->num_pg = le16_to_cpu(src->num_pg);
	grp->fpg_sz = le16_to_cpu(src->fpg_sz);
269

M
Matias Bjørling 已提交
270 271 272
	return 0;
}

273
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
M
Matias Bjørling 已提交
274
{
275
	struct nvme_ns *ns = nvmdev->q->queuedata;
M
Matias Bjørling 已提交
276 277 278 279 280
	struct nvme_nvm_id *nvme_nvm_id;
	struct nvme_nvm_command c = {};
	int ret;

	c.identity.opcode = nvme_nvm_admin_identity;
C
Christoph Hellwig 已提交
281
	c.identity.nsid = cpu_to_le32(ns->head->ns_id);
M
Matias Bjørling 已提交
282 283 284 285 286 287
	c.identity.chnl_off = 0;

	nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
	if (!nvme_nvm_id)
		return -ENOMEM;

288
	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
289
				nvme_nvm_id, sizeof(struct nvme_nvm_id));
M
Matias Bjørling 已提交
290 291 292 293 294 295 296 297 298
	if (ret) {
		ret = -EIO;
		goto out;
	}

	nvm_id->ver_id = nvme_nvm_id->ver_id;
	nvm_id->vmnt = nvme_nvm_id->vmnt;
	nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
	nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
M
Matias Bjørling 已提交
299
	memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
300
					sizeof(struct nvm_addr_format));
M
Matias Bjørling 已提交
301 302 303 304 305 306 307

	ret = init_grps(nvm_id, nvme_nvm_id);
out:
	kfree(nvme_nvm_id);
	return ret;
}

308
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
309
								u8 *blks)
M
Matias Bjørling 已提交
310
{
311
	struct request_queue *q = nvmdev->q;
312
	struct nvm_geo *geo = &nvmdev->geo;
M
Matias Bjørling 已提交
313
	struct nvme_ns *ns = q->queuedata;
314
	struct nvme_ctrl *ctrl = ns->ctrl;
M
Matias Bjørling 已提交
315
	struct nvme_nvm_command c = {};
316
	struct nvme_nvm_bb_tbl *bb_tbl;
317
	int nr_blks = geo->nr_chks * geo->plane_mode;
318
	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
M
Matias Bjørling 已提交
319 320 321
	int ret = 0;

	c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
C
Christoph Hellwig 已提交
322
	c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
323
	c.get_bb.spba = cpu_to_le64(ppa.ppa);
M
Matias Bjørling 已提交
324

325 326 327
	bb_tbl = kzalloc(tblsz, GFP_KERNEL);
	if (!bb_tbl)
		return -ENOMEM;
M
Matias Bjørling 已提交
328

329
	ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
330
								bb_tbl, tblsz);
M
Matias Bjørling 已提交
331
	if (ret) {
332
		dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
M
Matias Bjørling 已提交
333 334 335 336
		ret = -EIO;
		goto out;
	}

337 338
	if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
		bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
339
		dev_err(ctrl->device, "bbt format mismatch\n");
340 341 342 343 344 345
		ret = -EINVAL;
		goto out;
	}

	if (le16_to_cpu(bb_tbl->verid) != 1) {
		ret = -EINVAL;
346
		dev_err(ctrl->device, "bbt version not supported\n");
347 348 349
		goto out;
	}

350
	if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
351
		ret = -EINVAL;
352 353
		dev_err(ctrl->device,
				"bbt unsuspected blocks returned (%u!=%u)",
354
				le32_to_cpu(bb_tbl->tblks), nr_blks);
355 356 357
		goto out;
	}

358
	memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
M
Matias Bjørling 已提交
359
out:
360 361 362 363
	kfree(bb_tbl);
	return ret;
}

364 365
static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
							int nr_ppas, int type)
366
{
367
	struct nvme_ns *ns = nvmdev->q->queuedata;
368 369 370 371
	struct nvme_nvm_command c = {};
	int ret = 0;

	c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
C
Christoph Hellwig 已提交
372
	c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
373 374
	c.set_bb.spba = cpu_to_le64(ppas->ppa);
	c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
375 376
	c.set_bb.value = type;

377
	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
378
								NULL, 0);
379
	if (ret)
380 381
		dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
									ret);
M
Matias Bjørling 已提交
382 383 384
	return ret;
}

385 386
static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
				    struct nvme_nvm_command *c)
M
Matias Bjørling 已提交
387 388
{
	c->ph_rw.opcode = rqd->opcode;
C
Christoph Hellwig 已提交
389
	c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
M
Matias Bjørling 已提交
390
	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
391
	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
M
Matias Bjørling 已提交
392
	c->ph_rw.control = cpu_to_le16(rqd->flags);
393
	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
M
Matias Bjørling 已提交
394 395
}

396
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
M
Matias Bjørling 已提交
397 398 399
{
	struct nvm_rq *rqd = rq->end_io_data;

400
	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
401
	rqd->error = nvme_req(rq)->status;
402
	nvm_end_io(rqd);
M
Matias Bjørling 已提交
403

404
	kfree(nvme_req(rq)->cmd);
M
Matias Bjørling 已提交
405 406 407
	blk_mq_free_request(rq);
}

408 409 410
static struct request *nvme_nvm_alloc_request(struct request_queue *q,
					      struct nvm_rq *rqd,
					      struct nvme_nvm_command *cmd)
M
Matias Bjørling 已提交
411 412 413 414
{
	struct nvme_ns *ns = q->queuedata;
	struct request *rq;

415
	nvme_nvm_rqtocmd(rqd, ns, cmd);
416

417
	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
418 419 420
	if (IS_ERR(rq))
		return rq;

421
	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
M
Matias Bjørling 已提交
422

423 424
	if (rqd->bio) {
		blk_init_request_from_bio(rq, rqd->bio);
425 426 427 428
	} else {
		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
		rq->__data_len = 0;
	}
M
Matias Bjørling 已提交
429

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	return rq;
}

static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct nvme_nvm_command *cmd;
	struct request *rq;

	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	rq = nvme_nvm_alloc_request(q, rqd, cmd);
	if (IS_ERR(rq)) {
		kfree(cmd);
		return PTR_ERR(rq);
	}

M
Matias Bjørling 已提交
449 450 451 452 453 454 455
	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

	return 0;
}

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct request *rq;
	struct nvme_nvm_command cmd;
	int ret = 0;

	memset(&cmd, 0, sizeof(struct nvme_nvm_command));

	rq = nvme_nvm_alloc_request(q, rqd, &cmd);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	/* I/Os can fail and the error is signaled through rqd. Callers must
	 * handle the error accordingly.
	 */
	blk_execute_rq(q, NULL, rq, 0);
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;

	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
	rqd->error = nvme_req(rq)->status;

	blk_mq_free_request(rq);

	return ret;
}

484
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
M
Matias Bjørling 已提交
485
{
486
	struct nvme_ns *ns = nvmdev->q->queuedata;
M
Matias Bjørling 已提交
487

488
	return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
M
Matias Bjørling 已提交
489 490 491 492 493 494 495 496 497
}

static void nvme_nvm_destroy_dma_pool(void *pool)
{
	struct dma_pool *dma_pool = pool;

	dma_pool_destroy(dma_pool);
}

498
static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
M
Matias Bjørling 已提交
499 500 501 502 503
				    gfp_t mem_flags, dma_addr_t *dma_handler)
{
	return dma_pool_alloc(pool, mem_flags, dma_handler);
}

504
static void nvme_nvm_dev_dma_free(void *pool, void *addr,
M
Matias Bjørling 已提交
505 506
							dma_addr_t dma_handler)
{
507
	dma_pool_free(pool, addr, dma_handler);
M
Matias Bjørling 已提交
508 509 510 511 512 513
}

static struct nvm_dev_ops nvme_nvm_dev_ops = {
	.identity		= nvme_nvm_identity,

	.get_bb_tbl		= nvme_nvm_get_bb_tbl,
514
	.set_bb_tbl		= nvme_nvm_set_bb_tbl,
M
Matias Bjørling 已提交
515 516

	.submit_io		= nvme_nvm_submit_io,
517
	.submit_io_sync		= nvme_nvm_submit_io_sync,
M
Matias Bjørling 已提交
518 519 520 521 522 523 524 525 526

	.create_dma_pool	= nvme_nvm_create_dma_pool,
	.destroy_dma_pool	= nvme_nvm_destroy_dma_pool,
	.dev_dma_alloc		= nvme_nvm_dev_dma_alloc,
	.dev_dma_free		= nvme_nvm_dev_dma_free,

	.max_phys_sect		= 64,
};

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
static int nvme_nvm_submit_user_cmd(struct request_queue *q,
				struct nvme_ns *ns,
				struct nvme_nvm_command *vcmd,
				void __user *ubuf, unsigned int bufflen,
				void __user *meta_buf, unsigned int meta_len,
				void __user *ppa_buf, unsigned int ppa_len,
				u32 *result, u64 *status, unsigned int timeout)
{
	bool write = nvme_is_write((struct nvme_command *)vcmd);
	struct nvm_dev *dev = ns->ndev;
	struct gendisk *disk = ns->disk;
	struct request *rq;
	struct bio *bio = NULL;
	__le64 *ppa_list = NULL;
	dma_addr_t ppa_dma;
	__le64 *metadata = NULL;
	dma_addr_t metadata_dma;
	DECLARE_COMPLETION_ONSTACK(wait);
545
	int ret = 0;
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
			NVME_QID_ANY);
	if (IS_ERR(rq)) {
		ret = -ENOMEM;
		goto err_cmd;
	}

	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;

	if (ppa_buf && ppa_len) {
		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
		if (!ppa_list) {
			ret = -ENOMEM;
			goto err_rq;
		}
		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
						sizeof(u64) * (ppa_len + 1))) {
			ret = -EFAULT;
			goto err_ppa;
		}
		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
	} else {
		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
	}

	if (ubuf && bufflen) {
		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
		if (ret)
			goto err_ppa;
		bio = rq->bio;

		if (meta_buf && meta_len) {
			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
								&metadata_dma);
			if (!metadata) {
				ret = -ENOMEM;
				goto err_map;
			}

			if (write) {
				if (copy_from_user(metadata,
						(void __user *)meta_buf,
						meta_len)) {
					ret = -EFAULT;
					goto err_meta;
				}
			}
			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
		}

597
		bio->bi_disk = disk;
598 599
	}

600
	blk_execute_rq(q, NULL, rq, 0);
601

602 603
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
604 605
	else if (nvme_req(rq)->status & 0x7ff)
		ret = -EIO;
606
	if (result)
607
		*result = nvme_req(rq)->status & 0x7ff;
608 609 610 611 612 613 614 615 616 617 618
	if (status)
		*status = le64_to_cpu(nvme_req(rq)->result.u64);

	if (metadata && !ret && !write) {
		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
			ret = -EFAULT;
	}
err_meta:
	if (meta_buf && meta_len)
		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map:
619
	if (bio)
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
		blk_rq_unmap_user(bio);
err_ppa:
	if (ppa_buf && ppa_len)
		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
err_rq:
	blk_mq_free_request(rq);
err_cmd:
	return ret;
}

static int nvme_nvm_submit_vio(struct nvme_ns *ns,
					struct nvm_user_vio __user *uvio)
{
	struct nvm_user_vio vio;
	struct nvme_nvm_command c;
	unsigned int length;
	int ret;

	if (copy_from_user(&vio, uvio, sizeof(vio)))
		return -EFAULT;
	if (vio.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.ph_rw.opcode = vio.opcode;
C
Christoph Hellwig 已提交
645
	c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
	c.ph_rw.control = cpu_to_le16(vio.control);
	c.ph_rw.length = cpu_to_le16(vio.nppas);

	length = (vio.nppas + 1) << ns->lba_shift;

	ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
			(void __user *)(uintptr_t)vio.addr, length,
			(void __user *)(uintptr_t)vio.metadata,
							vio.metadata_len,
			(void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
			&vio.result, &vio.status, 0);

	if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
		return -EFAULT;

	return ret;
}

static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
					struct nvm_passthru_vio __user *uvcmd)
{
	struct nvm_passthru_vio vcmd;
	struct nvme_nvm_command c;
	struct request_queue *q;
	unsigned int timeout = 0;
	int ret;

	if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
		return -EFAULT;
	if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
		return -EACCES;
	if (vcmd.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.common.opcode = vcmd.opcode;
C
Christoph Hellwig 已提交
682
	c.common.nsid = cpu_to_le32(ns->head->ns_id);
683 684 685 686
	c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
	/* cdw11-12 */
	c.ph_rw.length = cpu_to_le16(vcmd.nppas);
687
	c.ph_rw.control  = cpu_to_le16(vcmd.control);
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);

	if (vcmd.timeout_ms)
		timeout = msecs_to_jiffies(vcmd.timeout_ms);

	q = admin ? ns->ctrl->admin_q : ns->queue;

	ret = nvme_nvm_submit_user_cmd(q, ns,
			(struct nvme_nvm_command *)&c,
			(void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
			(void __user *)(uintptr_t)vcmd.metadata,
							vcmd.metadata_len,
			(void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
			&vcmd.result, &vcmd.status, timeout);

	if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
		return -EFAULT;

	return ret;
}

int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case NVME_NVM_IOCTL_ADMIN_VIO:
		return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
	case NVME_NVM_IOCTL_IO_VIO:
		return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
	case NVME_NVM_IOCTL_SUBMIT_VIO:
		return nvme_nvm_submit_vio(ns, (void __user *)arg);
	default:
		return -ENOTTY;
	}
}

725
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
M
Matias Bjørling 已提交
726
{
727 728 729
	struct request_queue *q = ns->queue;
	struct nvm_dev *dev;

730 731
	_nvme_nvm_check_size();

732 733 734 735 736 737 738
	dev = nvm_alloc_dev(node);
	if (!dev)
		return -ENOMEM;

	dev->q = q;
	memcpy(dev->name, disk_name, DISK_NAME_LEN);
	dev->ops = &nvme_nvm_dev_ops;
739
	dev->private_data = ns;
740 741
	ns->ndev = dev;

742
	return nvm_register(dev);
M
Matias Bjørling 已提交
743 744
}

745
void nvme_nvm_unregister(struct nvme_ns *ns)
M
Matias Bjørling 已提交
746
{
747
	nvm_unregister(ns->ndev);
M
Matias Bjørling 已提交
748 749
}

750 751 752 753 754 755 756 757 758 759 760 761 762
static ssize_t nvm_dev_attr_show(struct device *dev,
				 struct device_attribute *dattr, char *page)
{
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
	struct nvm_dev *ndev = ns->ndev;
	struct nvm_id *id;
	struct nvm_id_group *grp;
	struct attribute *attr;

	if (!ndev)
		return 0;

	id = &ndev->identity;
763
	grp = &id->grp;
764 765 766 767 768 769 770 771 772 773
	attr = &dattr->attr;

	if (strcmp(attr->name, "version") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
	} else if (strcmp(attr->name, "capabilities") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
	} else if (strcmp(attr->name, "device_mode") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
774
	/* kept for compatibility */
775
	} else if (strcmp(attr->name, "media_manager") == 0) {
776
		return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	} else if (strcmp(attr->name, "ppa_format") == 0) {
		return scnprintf(page, PAGE_SIZE,
			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
			id->ppaf.ch_offset, id->ppaf.ch_len,
			id->ppaf.lun_offset, id->ppaf.lun_len,
			id->ppaf.pln_offset, id->ppaf.pln_len,
			id->ppaf.blk_offset, id->ppaf.blk_len,
			id->ppaf.pg_offset, id->ppaf.pg_len,
			id->ppaf.sect_offset, id->ppaf.sect_len);
	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
	} else if (strcmp(attr->name, "flash_media_type") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
	} else if (strcmp(attr->name, "num_channels") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
	} else if (strcmp(attr->name, "num_luns") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
	} else if (strcmp(attr->name, "num_planes") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
797
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	} else if (strcmp(attr->name, "num_pages") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
	} else if (strcmp(attr->name, "page_size") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
	} else if (strcmp(attr->name, "read_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
	} else if (strcmp(attr->name, "read_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
	} else if (strcmp(attr->name, "prog_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
	} else if (strcmp(attr->name, "prog_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
	} else if (strcmp(attr->name, "erase_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
	} else if (strcmp(attr->name, "erase_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
	} else if (strcmp(attr->name, "media_capabilities") == 0) {
		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n",
				ndev->ops->max_phys_sect);
	} else {
		return scnprintf(page,
				 PAGE_SIZE,
				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
				 attr->name);
	}
}

#define NVM_DEV_ATTR_RO(_name)						\
	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)

static NVM_DEV_ATTR_RO(version);
static NVM_DEV_ATTR_RO(vendor_opcode);
static NVM_DEV_ATTR_RO(capabilities);
static NVM_DEV_ATTR_RO(device_mode);
static NVM_DEV_ATTR_RO(ppa_format);
static NVM_DEV_ATTR_RO(media_manager);

static NVM_DEV_ATTR_RO(media_type);
static NVM_DEV_ATTR_RO(flash_media_type);
static NVM_DEV_ATTR_RO(num_channels);
static NVM_DEV_ATTR_RO(num_luns);
static NVM_DEV_ATTR_RO(num_planes);
static NVM_DEV_ATTR_RO(num_blocks);
static NVM_DEV_ATTR_RO(num_pages);
static NVM_DEV_ATTR_RO(page_size);
static NVM_DEV_ATTR_RO(hw_sector_size);
static NVM_DEV_ATTR_RO(oob_sector_size);
static NVM_DEV_ATTR_RO(read_typ);
static NVM_DEV_ATTR_RO(read_max);
static NVM_DEV_ATTR_RO(prog_typ);
static NVM_DEV_ATTR_RO(prog_max);
static NVM_DEV_ATTR_RO(erase_typ);
static NVM_DEV_ATTR_RO(erase_max);
static NVM_DEV_ATTR_RO(multiplane_modes);
static NVM_DEV_ATTR_RO(media_capabilities);
static NVM_DEV_ATTR_RO(max_phys_secs);

static struct attribute *nvm_dev_attrs[] = {
	&dev_attr_version.attr,
	&dev_attr_vendor_opcode.attr,
	&dev_attr_capabilities.attr,
	&dev_attr_device_mode.attr,
	&dev_attr_media_manager.attr,

	&dev_attr_ppa_format.attr,
	&dev_attr_media_type.attr,
	&dev_attr_flash_media_type.attr,
	&dev_attr_num_channels.attr,
	&dev_attr_num_luns.attr,
	&dev_attr_num_planes.attr,
	&dev_attr_num_blocks.attr,
	&dev_attr_num_pages.attr,
	&dev_attr_page_size.attr,
	&dev_attr_hw_sector_size.attr,
	&dev_attr_oob_sector_size.attr,
	&dev_attr_read_typ.attr,
	&dev_attr_read_max.attr,
	&dev_attr_prog_typ.attr,
	&dev_attr_prog_max.attr,
	&dev_attr_erase_typ.attr,
	&dev_attr_erase_max.attr,
	&dev_attr_multiplane_modes.attr,
	&dev_attr_media_capabilities.attr,
	&dev_attr_max_phys_secs.attr,
	NULL,
};

static const struct attribute_group nvm_dev_attr_group = {
	.name		= "lightnvm",
	.attrs		= nvm_dev_attrs,
};

int nvme_nvm_register_sysfs(struct nvme_ns *ns)
{
	return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
					&nvm_dev_attr_group);
}

void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
{
	sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
					&nvm_dev_attr_group);
}