lightnvm.c 23.0 KB
Newer Older
M
Matias Bjørling 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * nvme-lightnvm.c - LightNVM NVMe device
 *
 * Copyright (C) 2014-2015 IT University of Copenhagen
 * Initial release: Matias Bjorling <mb@lightnvm.io>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; see the file COPYING.  If not, write to
 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
 * USA.
 *
 */

#include "nvme.h"

#include <linux/nvme.h>
#include <linux/bitops.h>
#include <linux/lightnvm.h>
#include <linux/vmalloc.h>
29 30
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
M
Matias Bjørling 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

enum nvme_nvm_admin_opcode {
	nvme_nvm_admin_identity		= 0xe2,
	nvme_nvm_admin_get_bb_tbl	= 0xf2,
	nvme_nvm_admin_set_bb_tbl	= 0xf1,
};

struct nvme_nvm_ph_rw {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd2;
	__le64			metadata;
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			length;
	__le16			control;
	__le32			dsmgmt;
	__le64			resv;
};

struct nvme_nvm_identity {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
62
	__u32			rsvd11[6];
M
Matias Bjørling 已提交
63 64
};

65
struct nvme_nvm_getbbtbl {
M
Matias Bjørling 已提交
66 67 68 69 70 71 72
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	__le64			spba;
	__u32			rsvd4[4];
};

struct nvme_nvm_setbbtbl {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__le64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			nlb;
	__u8			value;
	__u8			rsvd3;
	__u32			rsvd4[3];
M
Matias Bjørling 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
};

struct nvme_nvm_erase_blk {
	__u8			opcode;
	__u8			flags;
	__u16			command_id;
	__le32			nsid;
	__u64			rsvd[2];
	__le64			prp1;
	__le64			prp2;
	__le64			spba;
	__le16			length;
	__le16			control;
	__le32			dsmgmt;
	__le64			resv;
};

struct nvme_nvm_command {
	union {
		struct nvme_common_command common;
		struct nvme_nvm_identity identity;
		struct nvme_nvm_ph_rw ph_rw;
112 113
		struct nvme_nvm_getbbtbl get_bb;
		struct nvme_nvm_setbbtbl set_bb;
M
Matias Bjørling 已提交
114 115 116 117
		struct nvme_nvm_erase_blk erase;
	};
};

118
#define NVME_NVM_LP_MLC_PAIRS 886
119
struct nvme_nvm_lp_mlc {
120
	__le16			num_pairs;
121
	__u8			pairs[NVME_NVM_LP_MLC_PAIRS];
122 123 124 125 126 127 128
};

struct nvme_nvm_lp_tbl {
	__u8			id[8];
	struct nvme_nvm_lp_mlc	mlc;
};

M
Matias Bjørling 已提交
129 130 131 132 133 134 135
struct nvme_nvm_id_group {
	__u8			mtype;
	__u8			fmtype;
	__le16			res16;
	__u8			num_ch;
	__u8			num_lun;
	__u8			num_pln;
136
	__u8			rsvd1;
137
	__le16			num_chk;
M
Matias Bjørling 已提交
138 139 140 141
	__le16			num_pg;
	__le16			fpg_sz;
	__le16			csecs;
	__le16			sos;
142
	__le16			rsvd2;
M
Matias Bjørling 已提交
143 144 145 146 147 148 149
	__le32			trdt;
	__le32			trdm;
	__le32			tprt;
	__le32			tprm;
	__le32			tbet;
	__le32			tbem;
	__le32			mpos;
150
	__le32			mccap;
M
Matias Bjørling 已提交
151
	__le16			cpar;
152 153
	__u8			reserved[10];
	struct nvme_nvm_lp_tbl lptbl;
M
Matias Bjørling 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
} __packed;

struct nvme_nvm_addr_format {
	__u8			ch_offset;
	__u8			ch_len;
	__u8			lun_offset;
	__u8			lun_len;
	__u8			pln_offset;
	__u8			pln_len;
	__u8			blk_offset;
	__u8			blk_len;
	__u8			pg_offset;
	__u8			pg_len;
	__u8			sect_offset;
	__u8			sect_len;
	__u8			res[4];
} __packed;

struct nvme_nvm_id {
	__u8			ver_id;
	__u8			vmnt;
	__u8			cgrps;
176
	__u8			res;
M
Matias Bjørling 已提交
177 178 179
	__le32			cap;
	__le32			dom;
	struct nvme_nvm_addr_format ppaf;
180
	__u8			resv[228];
M
Matias Bjørling 已提交
181 182 183
	struct nvme_nvm_id_group groups[4];
} __packed;

184 185 186 187 188 189 190 191 192 193 194 195 196 197
struct nvme_nvm_bb_tbl {
	__u8	tblid[4];
	__le16	verid;
	__le16	revid;
	__le32	rvsd1;
	__le32	tblks;
	__le32	tfact;
	__le32	tgrown;
	__le32	tdresv;
	__le32	thresv;
	__le32	rsvd2[8];
	__u8	blk[0];
};

M
Matias Bjørling 已提交
198 199 200 201 202 203 204
/*
 * Check we didn't inadvertently grow the command struct
 */
static inline void _nvme_nvm_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
205 206
	BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
M
Matias Bjørling 已提交
207 208
	BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
209
	BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
210
	BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
211
	BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
M
Matias Bjørling 已提交
212 213 214 215 216
}

static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
	struct nvme_nvm_id_group *src;
217 218
	struct nvm_id_group *grp;
	int sec_per_pg, sec_per_pl, pg_per_blk;
219 220 221 222 223

	if (nvme_nvm_id->cgrps != 1)
		return -EINVAL;

	src = &nvme_nvm_id->groups[0];
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	grp = &nvm_id->grp;

	grp->mtype = src->mtype;
	grp->fmtype = src->fmtype;

	grp->num_ch = src->num_ch;
	grp->num_lun = src->num_lun;

	grp->num_chk = le16_to_cpu(src->num_chk);
	grp->csecs = le16_to_cpu(src->csecs);
	grp->sos = le16_to_cpu(src->sos);

	pg_per_blk = le16_to_cpu(src->num_pg);
	sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
	sec_per_pl = sec_per_pg * src->num_pln;
	grp->clba = sec_per_pl * pg_per_blk;
	grp->ws_per_chk = pg_per_blk;

	grp->mpos = le32_to_cpu(src->mpos);
	grp->cpar = le16_to_cpu(src->cpar);
	grp->mccap = le32_to_cpu(src->mccap);

	grp->ws_opt = grp->ws_min = sec_per_pg;
	grp->ws_seq = NVM_IO_SNGL_ACCESS;

	if (grp->mpos & 0x020202) {
		grp->ws_seq = NVM_IO_DUAL_ACCESS;
		grp->ws_opt <<= 1;
	} else if (grp->mpos & 0x040404) {
		grp->ws_seq = NVM_IO_QUAD_ACCESS;
		grp->ws_opt <<= 2;
	}

	grp->trdt = le32_to_cpu(src->trdt);
	grp->trdm = le32_to_cpu(src->trdm);
	grp->tprt = le32_to_cpu(src->tprt);
	grp->tprm = le32_to_cpu(src->tprm);
	grp->tbet = le32_to_cpu(src->tbet);
	grp->tbem = le32_to_cpu(src->tbem);

	/* 1.2 compatibility */
	grp->num_pln = src->num_pln;
	grp->num_pg = le16_to_cpu(src->num_pg);
	grp->fpg_sz = le16_to_cpu(src->fpg_sz);
268

M
Matias Bjørling 已提交
269 270 271
	return 0;
}

272
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
M
Matias Bjørling 已提交
273
{
274
	struct nvme_ns *ns = nvmdev->q->queuedata;
M
Matias Bjørling 已提交
275 276 277 278 279
	struct nvme_nvm_id *nvme_nvm_id;
	struct nvme_nvm_command c = {};
	int ret;

	c.identity.opcode = nvme_nvm_admin_identity;
C
Christoph Hellwig 已提交
280
	c.identity.nsid = cpu_to_le32(ns->head->ns_id);
M
Matias Bjørling 已提交
281 282 283 284 285

	nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
	if (!nvme_nvm_id)
		return -ENOMEM;

286
	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
287
				nvme_nvm_id, sizeof(struct nvme_nvm_id));
M
Matias Bjørling 已提交
288 289 290 291 292 293 294 295 296
	if (ret) {
		ret = -EIO;
		goto out;
	}

	nvm_id->ver_id = nvme_nvm_id->ver_id;
	nvm_id->vmnt = nvme_nvm_id->vmnt;
	nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
	nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
M
Matias Bjørling 已提交
297
	memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
298
					sizeof(struct nvm_addr_format));
M
Matias Bjørling 已提交
299 300 301 302 303 304 305

	ret = init_grps(nvm_id, nvme_nvm_id);
out:
	kfree(nvme_nvm_id);
	return ret;
}

306
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
307
								u8 *blks)
M
Matias Bjørling 已提交
308
{
309
	struct request_queue *q = nvmdev->q;
310
	struct nvm_geo *geo = &nvmdev->geo;
M
Matias Bjørling 已提交
311
	struct nvme_ns *ns = q->queuedata;
312
	struct nvme_ctrl *ctrl = ns->ctrl;
M
Matias Bjørling 已提交
313
	struct nvme_nvm_command c = {};
314
	struct nvme_nvm_bb_tbl *bb_tbl;
315
	int nr_blks = geo->nr_chks * geo->plane_mode;
316
	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
M
Matias Bjørling 已提交
317 318 319
	int ret = 0;

	c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
C
Christoph Hellwig 已提交
320
	c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
321
	c.get_bb.spba = cpu_to_le64(ppa.ppa);
M
Matias Bjørling 已提交
322

323 324 325
	bb_tbl = kzalloc(tblsz, GFP_KERNEL);
	if (!bb_tbl)
		return -ENOMEM;
M
Matias Bjørling 已提交
326

327
	ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
328
								bb_tbl, tblsz);
M
Matias Bjørling 已提交
329
	if (ret) {
330
		dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
M
Matias Bjørling 已提交
331 332 333 334
		ret = -EIO;
		goto out;
	}

335 336
	if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
		bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
337
		dev_err(ctrl->device, "bbt format mismatch\n");
338 339 340 341 342 343
		ret = -EINVAL;
		goto out;
	}

	if (le16_to_cpu(bb_tbl->verid) != 1) {
		ret = -EINVAL;
344
		dev_err(ctrl->device, "bbt version not supported\n");
345 346 347
		goto out;
	}

348
	if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
349
		ret = -EINVAL;
350 351
		dev_err(ctrl->device,
				"bbt unsuspected blocks returned (%u!=%u)",
352
				le32_to_cpu(bb_tbl->tblks), nr_blks);
353 354 355
		goto out;
	}

356
	memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
M
Matias Bjørling 已提交
357
out:
358 359 360 361
	kfree(bb_tbl);
	return ret;
}

362 363
static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
							int nr_ppas, int type)
364
{
365
	struct nvme_ns *ns = nvmdev->q->queuedata;
366 367 368 369
	struct nvme_nvm_command c = {};
	int ret = 0;

	c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
C
Christoph Hellwig 已提交
370
	c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
371 372
	c.set_bb.spba = cpu_to_le64(ppas->ppa);
	c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
373 374
	c.set_bb.value = type;

375
	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
376
								NULL, 0);
377
	if (ret)
378 379
		dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
									ret);
M
Matias Bjørling 已提交
380 381 382
	return ret;
}

383 384
static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
				    struct nvme_nvm_command *c)
M
Matias Bjørling 已提交
385 386
{
	c->ph_rw.opcode = rqd->opcode;
C
Christoph Hellwig 已提交
387
	c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
M
Matias Bjørling 已提交
388
	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
389
	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
M
Matias Bjørling 已提交
390
	c->ph_rw.control = cpu_to_le16(rqd->flags);
391
	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
M
Matias Bjørling 已提交
392 393
}

394
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
M
Matias Bjørling 已提交
395 396 397
{
	struct nvm_rq *rqd = rq->end_io_data;

398
	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
399
	rqd->error = nvme_req(rq)->status;
400
	nvm_end_io(rqd);
M
Matias Bjørling 已提交
401

402
	kfree(nvme_req(rq)->cmd);
M
Matias Bjørling 已提交
403 404 405
	blk_mq_free_request(rq);
}

406 407 408
static struct request *nvme_nvm_alloc_request(struct request_queue *q,
					      struct nvm_rq *rqd,
					      struct nvme_nvm_command *cmd)
M
Matias Bjørling 已提交
409 410 411 412
{
	struct nvme_ns *ns = q->queuedata;
	struct request *rq;

413
	nvme_nvm_rqtocmd(rqd, ns, cmd);
414

415
	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
416 417 418
	if (IS_ERR(rq))
		return rq;

419
	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
M
Matias Bjørling 已提交
420

421 422
	if (rqd->bio) {
		blk_init_request_from_bio(rq, rqd->bio);
423 424 425 426
	} else {
		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
		rq->__data_len = 0;
	}
M
Matias Bjørling 已提交
427

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	return rq;
}

static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct nvme_nvm_command *cmd;
	struct request *rq;

	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	rq = nvme_nvm_alloc_request(q, rqd, cmd);
	if (IS_ERR(rq)) {
		kfree(cmd);
		return PTR_ERR(rq);
	}

M
Matias Bjørling 已提交
447 448 449 450 451 452 453
	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

	return 0;
}

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct request *rq;
	struct nvme_nvm_command cmd;
	int ret = 0;

	memset(&cmd, 0, sizeof(struct nvme_nvm_command));

	rq = nvme_nvm_alloc_request(q, rqd, &cmd);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	/* I/Os can fail and the error is signaled through rqd. Callers must
	 * handle the error accordingly.
	 */
	blk_execute_rq(q, NULL, rq, 0);
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;

	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
	rqd->error = nvme_req(rq)->status;

	blk_mq_free_request(rq);

	return ret;
}

482
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
M
Matias Bjørling 已提交
483
{
484
	struct nvme_ns *ns = nvmdev->q->queuedata;
M
Matias Bjørling 已提交
485

486
	return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
M
Matias Bjørling 已提交
487 488 489 490 491 492 493 494 495
}

static void nvme_nvm_destroy_dma_pool(void *pool)
{
	struct dma_pool *dma_pool = pool;

	dma_pool_destroy(dma_pool);
}

496
static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
M
Matias Bjørling 已提交
497 498 499 500 501
				    gfp_t mem_flags, dma_addr_t *dma_handler)
{
	return dma_pool_alloc(pool, mem_flags, dma_handler);
}

502
static void nvme_nvm_dev_dma_free(void *pool, void *addr,
M
Matias Bjørling 已提交
503 504
							dma_addr_t dma_handler)
{
505
	dma_pool_free(pool, addr, dma_handler);
M
Matias Bjørling 已提交
506 507 508 509 510 511
}

static struct nvm_dev_ops nvme_nvm_dev_ops = {
	.identity		= nvme_nvm_identity,

	.get_bb_tbl		= nvme_nvm_get_bb_tbl,
512
	.set_bb_tbl		= nvme_nvm_set_bb_tbl,
M
Matias Bjørling 已提交
513 514

	.submit_io		= nvme_nvm_submit_io,
515
	.submit_io_sync		= nvme_nvm_submit_io_sync,
M
Matias Bjørling 已提交
516 517 518 519 520 521 522 523 524

	.create_dma_pool	= nvme_nvm_create_dma_pool,
	.destroy_dma_pool	= nvme_nvm_destroy_dma_pool,
	.dev_dma_alloc		= nvme_nvm_dev_dma_alloc,
	.dev_dma_free		= nvme_nvm_dev_dma_free,

	.max_phys_sect		= 64,
};

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
static int nvme_nvm_submit_user_cmd(struct request_queue *q,
				struct nvme_ns *ns,
				struct nvme_nvm_command *vcmd,
				void __user *ubuf, unsigned int bufflen,
				void __user *meta_buf, unsigned int meta_len,
				void __user *ppa_buf, unsigned int ppa_len,
				u32 *result, u64 *status, unsigned int timeout)
{
	bool write = nvme_is_write((struct nvme_command *)vcmd);
	struct nvm_dev *dev = ns->ndev;
	struct gendisk *disk = ns->disk;
	struct request *rq;
	struct bio *bio = NULL;
	__le64 *ppa_list = NULL;
	dma_addr_t ppa_dma;
	__le64 *metadata = NULL;
	dma_addr_t metadata_dma;
	DECLARE_COMPLETION_ONSTACK(wait);
543
	int ret = 0;
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594

	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
			NVME_QID_ANY);
	if (IS_ERR(rq)) {
		ret = -ENOMEM;
		goto err_cmd;
	}

	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;

	if (ppa_buf && ppa_len) {
		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
		if (!ppa_list) {
			ret = -ENOMEM;
			goto err_rq;
		}
		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
						sizeof(u64) * (ppa_len + 1))) {
			ret = -EFAULT;
			goto err_ppa;
		}
		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
	} else {
		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
	}

	if (ubuf && bufflen) {
		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
		if (ret)
			goto err_ppa;
		bio = rq->bio;

		if (meta_buf && meta_len) {
			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
								&metadata_dma);
			if (!metadata) {
				ret = -ENOMEM;
				goto err_map;
			}

			if (write) {
				if (copy_from_user(metadata,
						(void __user *)meta_buf,
						meta_len)) {
					ret = -EFAULT;
					goto err_meta;
				}
			}
			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
		}

595
		bio->bi_disk = disk;
596 597
	}

598
	blk_execute_rq(q, NULL, rq, 0);
599

600 601
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
602 603
	else if (nvme_req(rq)->status & 0x7ff)
		ret = -EIO;
604
	if (result)
605
		*result = nvme_req(rq)->status & 0x7ff;
606 607 608 609 610 611 612 613 614 615 616
	if (status)
		*status = le64_to_cpu(nvme_req(rq)->result.u64);

	if (metadata && !ret && !write) {
		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
			ret = -EFAULT;
	}
err_meta:
	if (meta_buf && meta_len)
		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map:
617
	if (bio)
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
		blk_rq_unmap_user(bio);
err_ppa:
	if (ppa_buf && ppa_len)
		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
err_rq:
	blk_mq_free_request(rq);
err_cmd:
	return ret;
}

static int nvme_nvm_submit_vio(struct nvme_ns *ns,
					struct nvm_user_vio __user *uvio)
{
	struct nvm_user_vio vio;
	struct nvme_nvm_command c;
	unsigned int length;
	int ret;

	if (copy_from_user(&vio, uvio, sizeof(vio)))
		return -EFAULT;
	if (vio.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.ph_rw.opcode = vio.opcode;
C
Christoph Hellwig 已提交
643
	c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	c.ph_rw.control = cpu_to_le16(vio.control);
	c.ph_rw.length = cpu_to_le16(vio.nppas);

	length = (vio.nppas + 1) << ns->lba_shift;

	ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
			(void __user *)(uintptr_t)vio.addr, length,
			(void __user *)(uintptr_t)vio.metadata,
							vio.metadata_len,
			(void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
			&vio.result, &vio.status, 0);

	if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
		return -EFAULT;

	return ret;
}

static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
					struct nvm_passthru_vio __user *uvcmd)
{
	struct nvm_passthru_vio vcmd;
	struct nvme_nvm_command c;
	struct request_queue *q;
	unsigned int timeout = 0;
	int ret;

	if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
		return -EFAULT;
	if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
		return -EACCES;
	if (vcmd.flags)
		return -EINVAL;

	memset(&c, 0, sizeof(c));
	c.common.opcode = vcmd.opcode;
C
Christoph Hellwig 已提交
680
	c.common.nsid = cpu_to_le32(ns->head->ns_id);
681 682 683 684
	c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
	c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
	/* cdw11-12 */
	c.ph_rw.length = cpu_to_le16(vcmd.nppas);
685
	c.ph_rw.control  = cpu_to_le16(vcmd.control);
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
	c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
	c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);

	if (vcmd.timeout_ms)
		timeout = msecs_to_jiffies(vcmd.timeout_ms);

	q = admin ? ns->ctrl->admin_q : ns->queue;

	ret = nvme_nvm_submit_user_cmd(q, ns,
			(struct nvme_nvm_command *)&c,
			(void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
			(void __user *)(uintptr_t)vcmd.metadata,
							vcmd.metadata_len,
			(void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
			&vcmd.result, &vcmd.status, timeout);

	if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
		return -EFAULT;

	return ret;
}

int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case NVME_NVM_IOCTL_ADMIN_VIO:
		return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
	case NVME_NVM_IOCTL_IO_VIO:
		return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
	case NVME_NVM_IOCTL_SUBMIT_VIO:
		return nvme_nvm_submit_vio(ns, (void __user *)arg);
	default:
		return -ENOTTY;
	}
}

723
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
M
Matias Bjørling 已提交
724
{
725 726 727
	struct request_queue *q = ns->queue;
	struct nvm_dev *dev;

728 729
	_nvme_nvm_check_size();

730 731 732 733 734 735 736
	dev = nvm_alloc_dev(node);
	if (!dev)
		return -ENOMEM;

	dev->q = q;
	memcpy(dev->name, disk_name, DISK_NAME_LEN);
	dev->ops = &nvme_nvm_dev_ops;
737
	dev->private_data = ns;
738 739
	ns->ndev = dev;

740
	return nvm_register(dev);
M
Matias Bjørling 已提交
741 742
}

743
void nvme_nvm_unregister(struct nvme_ns *ns)
M
Matias Bjørling 已提交
744
{
745
	nvm_unregister(ns->ndev);
M
Matias Bjørling 已提交
746 747
}

748 749 750 751 752 753 754 755 756 757 758 759 760
static ssize_t nvm_dev_attr_show(struct device *dev,
				 struct device_attribute *dattr, char *page)
{
	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
	struct nvm_dev *ndev = ns->ndev;
	struct nvm_id *id;
	struct nvm_id_group *grp;
	struct attribute *attr;

	if (!ndev)
		return 0;

	id = &ndev->identity;
761
	grp = &id->grp;
762 763 764 765 766 767 768 769 770 771
	attr = &dattr->attr;

	if (strcmp(attr->name, "version") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
	} else if (strcmp(attr->name, "capabilities") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
	} else if (strcmp(attr->name, "device_mode") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
772
	/* kept for compatibility */
773
	} else if (strcmp(attr->name, "media_manager") == 0) {
774
		return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	} else if (strcmp(attr->name, "ppa_format") == 0) {
		return scnprintf(page, PAGE_SIZE,
			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
			id->ppaf.ch_offset, id->ppaf.ch_len,
			id->ppaf.lun_offset, id->ppaf.lun_len,
			id->ppaf.pln_offset, id->ppaf.pln_len,
			id->ppaf.blk_offset, id->ppaf.blk_len,
			id->ppaf.pg_offset, id->ppaf.pg_len,
			id->ppaf.sect_offset, id->ppaf.sect_len);
	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
	} else if (strcmp(attr->name, "flash_media_type") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
	} else if (strcmp(attr->name, "num_channels") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
	} else if (strcmp(attr->name, "num_luns") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
	} else if (strcmp(attr->name, "num_planes") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
795
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	} else if (strcmp(attr->name, "num_pages") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
	} else if (strcmp(attr->name, "page_size") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
	} else if (strcmp(attr->name, "read_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
	} else if (strcmp(attr->name, "read_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
	} else if (strcmp(attr->name, "prog_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
	} else if (strcmp(attr->name, "prog_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
	} else if (strcmp(attr->name, "erase_typ") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
	} else if (strcmp(attr->name, "erase_max") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
	} else if (strcmp(attr->name, "media_capabilities") == 0) {
		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
		return scnprintf(page, PAGE_SIZE, "%u\n",
				ndev->ops->max_phys_sect);
	} else {
		return scnprintf(page,
				 PAGE_SIZE,
				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
				 attr->name);
	}
}

#define NVM_DEV_ATTR_RO(_name)						\
	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)

static NVM_DEV_ATTR_RO(version);
static NVM_DEV_ATTR_RO(vendor_opcode);
static NVM_DEV_ATTR_RO(capabilities);
static NVM_DEV_ATTR_RO(device_mode);
static NVM_DEV_ATTR_RO(ppa_format);
static NVM_DEV_ATTR_RO(media_manager);

static NVM_DEV_ATTR_RO(media_type);
static NVM_DEV_ATTR_RO(flash_media_type);
static NVM_DEV_ATTR_RO(num_channels);
static NVM_DEV_ATTR_RO(num_luns);
static NVM_DEV_ATTR_RO(num_planes);
static NVM_DEV_ATTR_RO(num_blocks);
static NVM_DEV_ATTR_RO(num_pages);
static NVM_DEV_ATTR_RO(page_size);
static NVM_DEV_ATTR_RO(hw_sector_size);
static NVM_DEV_ATTR_RO(oob_sector_size);
static NVM_DEV_ATTR_RO(read_typ);
static NVM_DEV_ATTR_RO(read_max);
static NVM_DEV_ATTR_RO(prog_typ);
static NVM_DEV_ATTR_RO(prog_max);
static NVM_DEV_ATTR_RO(erase_typ);
static NVM_DEV_ATTR_RO(erase_max);
static NVM_DEV_ATTR_RO(multiplane_modes);
static NVM_DEV_ATTR_RO(media_capabilities);
static NVM_DEV_ATTR_RO(max_phys_secs);

static struct attribute *nvm_dev_attrs[] = {
	&dev_attr_version.attr,
	&dev_attr_vendor_opcode.attr,
	&dev_attr_capabilities.attr,
	&dev_attr_device_mode.attr,
	&dev_attr_media_manager.attr,

	&dev_attr_ppa_format.attr,
	&dev_attr_media_type.attr,
	&dev_attr_flash_media_type.attr,
	&dev_attr_num_channels.attr,
	&dev_attr_num_luns.attr,
	&dev_attr_num_planes.attr,
	&dev_attr_num_blocks.attr,
	&dev_attr_num_pages.attr,
	&dev_attr_page_size.attr,
	&dev_attr_hw_sector_size.attr,
	&dev_attr_oob_sector_size.attr,
	&dev_attr_read_typ.attr,
	&dev_attr_read_max.attr,
	&dev_attr_prog_typ.attr,
	&dev_attr_prog_max.attr,
	&dev_attr_erase_typ.attr,
	&dev_attr_erase_max.attr,
	&dev_attr_multiplane_modes.attr,
	&dev_attr_media_capabilities.attr,
	&dev_attr_max_phys_secs.attr,
	NULL,
};

static const struct attribute_group nvm_dev_attr_group = {
	.name		= "lightnvm",
	.attrs		= nvm_dev_attrs,
};

int nvme_nvm_register_sysfs(struct nvme_ns *ns)
{
	return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
					&nvm_dev_attr_group);
}

void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
{
	sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
					&nvm_dev_attr_group);
}