core.c 23.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
 * Initial release: Matias Bjorling <m@bjorling.me>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; see the file COPYING.  If not, write to
 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
 * USA.
 *
 */

#include <linux/list.h>
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
28
#include <linux/sched/sysctl.h>
29

30
static LIST_HEAD(nvm_tgt_types);
31
static DECLARE_RWSEM(nvm_tgtt_lock);
32 33 34 35
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
static DECLARE_RWSEM(nvm_lock);

36
struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37
{
38
	struct nvm_tgt_type *tmp, *tt = NULL;
39

40
	if (lock)
41
		down_write(&nvm_tgtt_lock);
42

43 44 45 46 47
	list_for_each_entry(tmp, &nvm_tgt_types, list)
		if (!strcmp(name, tmp->name)) {
			tt = tmp;
			break;
		}
48

49
	if (lock)
50
		up_write(&nvm_tgtt_lock);
51
	return tt;
52
}
53
EXPORT_SYMBOL(nvm_find_target_type);
54

55
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
56 57 58
{
	int ret = 0;

59
	down_write(&nvm_tgtt_lock);
60
	if (nvm_find_target_type(tt->name, 0))
61 62
		ret = -EEXIST;
	else
63
		list_add(&tt->list, &nvm_tgt_types);
64
	up_write(&nvm_tgtt_lock);
65 66 67

	return ret;
}
68
EXPORT_SYMBOL(nvm_register_tgt_type);
69

70
void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
71 72 73 74 75 76 77 78
{
	if (!tt)
		return;

	down_write(&nvm_lock);
	list_del(&tt->list);
	up_write(&nvm_lock);
}
79
EXPORT_SYMBOL(nvm_unregister_tgt_type);
80 81 82 83

void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
							dma_addr_t *dma_handler)
{
84
	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
85 86 87 88
								dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);

89
void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
90
{
91
	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
92 93 94 95 96 97 98 99 100 101 102 103 104 105
}
EXPORT_SYMBOL(nvm_dev_dma_free);

static struct nvmm_type *nvm_find_mgr_type(const char *name)
{
	struct nvmm_type *mt;

	list_for_each_entry(mt, &nvm_mgrs, list)
		if (!strcmp(name, mt->name))
			return mt;

	return NULL;
}

106
static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
107 108 109 110 111 112 113
{
	struct nvmm_type *mt;
	int ret;

	lockdep_assert_held(&nvm_lock);

	list_for_each_entry(mt, &nvm_mgrs, list) {
114 115 116
		if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
			continue;

117 118 119 120 121 122 123 124 125 126 127 128
		ret = mt->register_mgr(dev);
		if (ret < 0) {
			pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
								ret, dev->name);
			return NULL; /* initialization failed */
		} else if (ret > 0)
			return mt;
	}

	return NULL;
}

129 130
int nvm_register_mgr(struct nvmm_type *mt)
{
131
	struct nvm_dev *dev;
132 133 134
	int ret = 0;

	down_write(&nvm_lock);
135
	if (nvm_find_mgr_type(mt->name)) {
136
		ret = -EEXIST;
137 138
		goto finish;
	} else {
139
		list_add(&mt->list, &nvm_mgrs);
140 141 142 143 144 145 146 147 148 149
	}

	/* try to register media mgr if any device have none configured */
	list_for_each_entry(dev, &nvm_devices, devices) {
		if (dev->mt)
			continue;

		dev->mt = nvm_init_mgr(dev);
	}
finish:
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
	up_write(&nvm_lock);

	return ret;
}
EXPORT_SYMBOL(nvm_register_mgr);

void nvm_unregister_mgr(struct nvmm_type *mt)
{
	if (!mt)
		return;

	down_write(&nvm_lock);
	list_del(&mt->list);
	up_write(&nvm_lock);
}
EXPORT_SYMBOL(nvm_unregister_mgr);

static struct nvm_dev *nvm_find_nvm_dev(const char *name)
{
	struct nvm_dev *dev;

	list_for_each_entry(dev, &nvm_devices, devices)
		if (!strcmp(name, dev->name))
			return dev;

	return NULL;
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
								int type)
{
	struct nvm_rq rqd;
	int ret;

	if (nr_ppas > dev->ops->max_phys_sect) {
		pr_err("nvm: unable to update all sysblocks atomically\n");
		return -EINVAL;
	}

	memset(&rqd, 0, sizeof(struct nvm_rq));

	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
	nvm_generic_to_addr_mode(dev, &rqd);

	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
	nvm_free_rqd_ppalist(dev, &rqd);
	if (ret) {
		pr_err("nvm: sysblk failed bb mark\n");
		return -EINVAL;
	}

	return 0;
}
EXPORT_SYMBOL(nvm_set_bb_tbl);

205 206 207 208 209 210 211 212
int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
{
	struct nvm_dev *dev = tgt_dev->parent;

	return dev->ops->max_phys_sect;
}
EXPORT_SYMBOL(nvm_max_phys_sects);

213
int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
214
{
215 216 217
	struct nvm_dev *dev = tgt_dev->parent;

	return dev->mt->submit_io(tgt_dev, rqd);
218 219 220
}
EXPORT_SYMBOL(nvm_submit_io);

221
int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
222
{
223 224 225
	struct nvm_dev *dev = tgt_dev->parent;

	return dev->mt->erase_blk(tgt_dev, p, flags);
226 227 228
}
EXPORT_SYMBOL(nvm_erase_blk);

229
int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
230 231
		    nvm_l2p_update_fn *update_l2p, void *priv)
{
232 233
	struct nvm_dev *dev = tgt_dev->parent;

234 235 236 237 238 239 240
	if (!dev->ops->get_l2p_tbl)
		return 0;

	return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
}
EXPORT_SYMBOL(nvm_get_l2p_tbl);

241
int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
242
{
243 244
	struct nvm_dev *dev = tgt_dev->parent;

245 246 247 248
	return dev->mt->get_area(dev, lba, len);
}
EXPORT_SYMBOL(nvm_get_area);

249
void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
250
{
251 252
	struct nvm_dev *dev = tgt_dev->parent;

253 254 255 256
	dev->mt->put_area(dev, lba);
}
EXPORT_SYMBOL(nvm_put_area);

257 258 259 260
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

261 262
	if (rqd->nr_ppas > 1) {
		for (i = 0; i < rqd->nr_ppas; i++)
263 264 265 266 267 268 269 270 271 272 273 274
			rqd->ppa_list[i] = dev_to_generic_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
	}
}
EXPORT_SYMBOL(nvm_addr_to_generic_mode);

void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	int i;

275 276
	if (rqd->nr_ppas > 1) {
		for (i = 0; i < rqd->nr_ppas; i++)
277 278 279 280 281 282 283 284
			rqd->ppa_list[i] = generic_to_dev_addr(dev,
							rqd->ppa_list[i]);
	} else {
		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
	}
}
EXPORT_SYMBOL(nvm_generic_to_addr_mode);

285
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
286
			const struct ppa_addr *ppas, int nr_ppas, int vblk)
287
{
288
	struct nvm_geo *geo = &dev->geo;
289
	int i, plane_cnt, pl_idx;
290
	struct ppa_addr ppa;
291

292
	if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
293
		rqd->nr_ppas = nr_ppas;
294
		rqd->ppa_addr = ppas[0];
295 296

		return 0;
297
	}
298

299
	rqd->nr_ppas = nr_ppas;
300 301 302 303 304 305
	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
	if (!rqd->ppa_list) {
		pr_err("nvm: failed to allocate dma memory\n");
		return -ENOMEM;
	}

306 307 308 309
	if (!vblk) {
		for (i = 0; i < nr_ppas; i++)
			rqd->ppa_list[i] = ppas[i];
	} else {
310
		plane_cnt = geo->plane_mode;
311
		rqd->nr_ppas *= plane_cnt;
312

313
		for (i = 0; i < nr_ppas; i++) {
314
			for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
315 316 317
				ppa = ppas[i];
				ppa.g.pl = pl_idx;
				rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
318
			}
319 320 321
		}
	}

322 323 324 325 326 327 328 329 330 331 332 333 334
	return 0;
}
EXPORT_SYMBOL(nvm_set_rqd_ppalist);

void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	if (!rqd->ppa_list)
		return;

	nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);

335 336
int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
								int flags)
337 338 339 340 341 342 343 344 345
{
	struct nvm_rq rqd;
	int ret;

	if (!dev->ops->erase_block)
		return 0;

	memset(&rqd, 0, sizeof(struct nvm_rq));

346
	ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
347 348 349
	if (ret)
		return ret;

350 351
	nvm_generic_to_addr_mode(dev, &rqd);

352 353
	rqd.flags = flags;

354 355
	ret = dev->ops->erase_block(dev, &rqd);

356
	nvm_free_rqd_ppalist(dev, &rqd);
357 358 359 360 361

	return ret;
}
EXPORT_SYMBOL(nvm_erase_ppa);

362 363
void nvm_end_io(struct nvm_rq *rqd, int error)
{
364 365
	rqd->error = error;
	rqd->end_io(rqd);
366 367 368
}
EXPORT_SYMBOL(nvm_end_io);

369
static void nvm_end_io_sync(struct nvm_rq *rqd)
370 371 372 373 374 375 376 377
{
	struct completion *waiting = rqd->wait;

	rqd->wait = NULL;

	complete(waiting);
}

378
static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
379
						int flags, void *buf, int len)
380 381 382 383 384 385 386 387 388 389
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;
	int ret;
	unsigned long hang_check;

	bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
	if (IS_ERR_OR_NULL(bio))
		return -ENOMEM;

390
	nvm_generic_to_addr_mode(dev, rqd);
391

392
	rqd->dev = NULL;
393 394 395 396 397
	rqd->opcode = opcode;
	rqd->flags = flags;
	rqd->bio = bio;
	rqd->wait = &wait;
	rqd->end_io = nvm_end_io_sync;
398

399
	ret = dev->ops->submit_io(dev, rqd);
400 401 402 403
	if (ret) {
		bio_put(bio);
		return ret;
	}
404 405 406 407

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
408 409 410
		while (!wait_for_completion_io_timeout(&wait,
							hang_check * (HZ/2)))
			;
411 412 413
	else
		wait_for_completion_io(&wait);

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	return rqd->error;
}

/**
 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
 *			 take to free ppa list if necessary.
 * @dev:	device
 * @ppa_list:	user created ppa_list
 * @nr_ppas:	length of ppa_list
 * @opcode:	device opcode
 * @flags:	device flags
 * @buf:	data buffer
 * @len:	data buffer length
 */
int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
			int nr_ppas, int opcode, int flags, void *buf, int len)
{
	struct nvm_rq rqd;

	if (dev->ops->max_phys_sect < nr_ppas)
		return -EINVAL;

	memset(&rqd, 0, sizeof(struct nvm_rq));

438
	rqd.nr_ppas = nr_ppas;
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	if (nr_ppas > 1)
		rqd.ppa_list = ppa_list;
	else
		rqd.ppa_addr = ppa_list[0];

	return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
}
EXPORT_SYMBOL(nvm_submit_ppa_list);

/**
 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
 *		    as single, dual, quad plane PPAs depending on device type.
 * @dev:	device
 * @ppa:	user created ppa_list
 * @nr_ppas:	length of ppa_list
 * @opcode:	device opcode
 * @flags:	device flags
 * @buf:	data buffer
 * @len:	data buffer length
 */
int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
				int opcode, int flags, void *buf, int len)
{
	struct nvm_rq rqd;
	int ret;

	memset(&rqd, 0, sizeof(struct nvm_rq));
466
	ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
467 468 469 470 471
	if (ret)
		return ret;

	ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);

472 473
	nvm_free_rqd_ppalist(dev, &rqd);

474
	return ret;
475 476 477
}
EXPORT_SYMBOL(nvm_submit_ppa);

478 479 480 481 482 483 484 485 486 487
/*
 * folds a bad block list from its plane representation to its virtual
 * block representation. The fold is done in place and reduced size is
 * returned.
 *
 * If any of the planes status are bad or grown bad block, the virtual block
 * is marked bad. If not bad, the first plane state acts as the block state.
 */
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
{
488
	struct nvm_geo *geo = &dev->geo;
489 490
	int blk, offset, pl, blktype;

491
	if (nr_blks != geo->blks_per_lun * geo->plane_mode)
492 493
		return -EINVAL;

494 495
	for (blk = 0; blk < geo->blks_per_lun; blk++) {
		offset = blk * geo->plane_mode;
496 497 498
		blktype = blks[offset];

		/* Bad blocks on any planes take precedence over other types */
499
		for (pl = 0; pl < geo->plane_mode; pl++) {
500 501 502 503 504 505 506 507 508 509
			if (blks[offset + pl] &
					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
				blktype = blks[offset + pl];
				break;
			}
		}

		blks[blk] = blktype;
	}

510
	return geo->blks_per_lun;
511 512 513
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);

514 515 516 517 518 519 520 521
int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
{
	ppa = generic_to_dev_addr(dev, ppa);

	return dev->ops->get_bb_tbl(dev, ppa, blks);
}
EXPORT_SYMBOL(nvm_get_bb_tbl);

522 523
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
524
	struct nvm_geo *geo = &dev->geo;
525 526
	int i;

527
	dev->lps_per_blk = geo->pgs_per_blk;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* Just a linear array */
	for (i = 0; i < dev->lps_per_blk; i++)
		dev->lptbl[i] = i;

	return 0;
}

static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
	int i, p;
	struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;

	if (!mlc->num_pairs)
		return 0;

	dev->lps_per_blk = mlc->num_pairs;
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* The lower page table encoding consists of a list of bytes, where each
	 * has a lower and an upper half. The first half byte maintains the
	 * increment value and every value after is an offset added to the
555 556
	 * previous incrementation value
	 */
557 558 559 560 561 562 563 564 565 566 567 568
	dev->lptbl[0] = mlc->pairs[0] & 0xF;
	for (i = 1; i < dev->lps_per_blk; i++) {
		p = mlc->pairs[i >> 1];
		if (i & 0x1) /* upper */
			dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
		else /* lower */
			dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
	}

	return 0;
}

569 570 571 572
static int nvm_core_init(struct nvm_dev *dev)
{
	struct nvm_id *id = &dev->identity;
	struct nvm_id_group *grp = &id->groups[0];
573
	struct nvm_geo *geo = &dev->geo;
574
	int ret;
575

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	/* Whole device values */
	geo->nr_chnls = grp->num_ch;
	geo->luns_per_chnl = grp->num_lun;

	/* Generic device values */
	geo->pgs_per_blk = grp->num_pg;
	geo->blks_per_lun = grp->num_blk;
	geo->nr_planes = grp->num_pln;
	geo->fpg_size = grp->fpg_sz;
	geo->pfpg_size = grp->fpg_sz * grp->num_pln;
	geo->sec_size = grp->csecs;
	geo->oob_size = grp->sos;
	geo->sec_per_pg = grp->fpg_sz / grp->csecs;
	geo->mccap = grp->mccap;
	memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));

	geo->plane_mode = NVM_PLANE_SINGLE;
	geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
594 595

	if (grp->mpos & 0x020202)
596
		geo->plane_mode = NVM_PLANE_DOUBLE;
597
	if (grp->mpos & 0x040404)
598
		geo->plane_mode = NVM_PLANE_QUAD;
599

600 601 602 603 604
	if (grp->mtype != 0) {
		pr_err("nvm: memory type not supported\n");
		return -EINVAL;
	}

605
	/* calculated values */
606 607 608 609
	geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
	geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
	geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
	geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
610

611 612
	dev->total_secs = geo->nr_luns * geo->sec_per_lun;
	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
W
Wenwei Tao 已提交
613 614 615
					sizeof(unsigned long), GFP_KERNEL);
	if (!dev->lun_map)
		return -ENOMEM;
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

	switch (grp->fmtype) {
	case NVM_ID_FMTYPE_SLC:
		if (nvm_init_slc_tbl(dev, grp)) {
			ret = -ENOMEM;
			goto err_fmtype;
		}
		break;
	case NVM_ID_FMTYPE_MLC:
		if (nvm_init_mlc_tbl(dev, grp)) {
			ret = -ENOMEM;
			goto err_fmtype;
		}
		break;
	default:
		pr_err("nvm: flash type not supported\n");
		ret = -EINVAL;
		goto err_fmtype;
	}

636
	mutex_init(&dev->mlock);
637
	spin_lock_init(&dev->lock);
638

639
	blk_queue_logical_block_size(dev->q, geo->sec_size);
640

641
	return 0;
642 643 644
err_fmtype:
	kfree(dev->lun_map);
	return ret;
645 646
}

647 648 649 650 651 652 653 654 655
static void nvm_free_mgr(struct nvm_dev *dev)
{
	if (!dev->mt)
		return;

	dev->mt->unregister_mgr(dev);
	dev->mt = NULL;
}

656
void nvm_free(struct nvm_dev *dev)
657 658 659 660
{
	if (!dev)
		return;

661
	nvm_free_mgr(dev);
662

663 664 665
	if (dev->dma_pool)
		dev->ops->destroy_dma_pool(dev->dma_pool);

666
	kfree(dev->lptbl);
667
	kfree(dev->lun_map);
668
	kfree(dev);
669 670 671 672
}

static int nvm_init(struct nvm_dev *dev)
{
673
	struct nvm_geo *geo = &dev->geo;
674
	int ret = -EINVAL;
675 676

	if (!dev->q || !dev->ops)
677
		return ret;
678

679
	if (dev->ops->identity(dev, &dev->identity)) {
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		pr_err("nvm: device could not be identified\n");
		goto err;
	}

	pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
			dev->identity.ver_id, dev->identity.vmnt,
							dev->identity.cgrps);

	if (dev->identity.ver_id != 1) {
		pr_err("nvm: device not supported by kernel.");
		goto err;
	}

	if (dev->identity.cgrps != 1) {
		pr_err("nvm: only one group configuration supported.");
		goto err;
	}

	ret = nvm_core_init(dev);
	if (ret) {
		pr_err("nvm: could not initialize core structures.\n");
		goto err;
	}

	pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
705 706 707
			dev->name, geo->sec_per_pg, geo->nr_planes,
			geo->pgs_per_blk, geo->blks_per_lun,
			geo->nr_luns, geo->nr_chnls);
708 709 710 711 712 713
	return 0;
err:
	pr_err("nvm: failed to initialize nvm\n");
	return ret;
}

714
struct nvm_dev *nvm_alloc_dev(int node)
715
{
716
	return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
717
}
718
EXPORT_SYMBOL(nvm_alloc_dev);
719

720
int nvm_register(struct nvm_dev *dev)
721 722 723 724 725 726 727
{
	int ret;

	ret = nvm_init(dev);
	if (ret)
		goto err_init;

728 729 730 731 732 733
	if (dev->ops->max_phys_sect > 256) {
		pr_info("nvm: max sectors supported is 256.\n");
		ret = -EINVAL;
		goto err_init;
	}

734
	if (dev->ops->max_phys_sect > 1) {
735 736 737
		dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
		if (!dev->dma_pool) {
			pr_err("nvm: could not create dma pool\n");
738 739
			ret = -ENOMEM;
			goto err_init;
740 741 742
		}
	}

743 744 745 746 747 748 749
	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
		ret = nvm_get_sysblock(dev, &dev->sb);
		if (!ret)
			pr_err("nvm: device not initialized.\n");
		else if (ret < 0)
			pr_err("nvm: err (%d) on device initialization\n", ret);
	}
750

751
	/* register device with a supported media manager */
752
	down_write(&nvm_lock);
753 754
	if (ret > 0)
		dev->mt = nvm_init_mgr(dev);
755 756 757
	list_add(&dev->devices, &nvm_devices);
	up_write(&nvm_lock);

758 759
	return 0;
err_init:
W
Wenwei Tao 已提交
760
	kfree(dev->lun_map);
761 762 763 764
	return ret;
}
EXPORT_SYMBOL(nvm_register);

765
void nvm_unregister(struct nvm_dev *dev)
766
{
W
Wenwei Tao 已提交
767
	down_write(&nvm_lock);
768 769
	list_del(&dev->devices);
	up_write(&nvm_lock);
770

771
	nvm_free(dev);
772 773 774 775 776 777 778 779
}
EXPORT_SYMBOL(nvm_unregister);

static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
	struct nvm_dev *dev;
	struct nvm_ioctl_create_simple *s;

W
Wenwei Tao 已提交
780
	down_write(&nvm_lock);
781
	dev = nvm_find_nvm_dev(create->dev);
W
Wenwei Tao 已提交
782
	up_write(&nvm_lock);
783

784 785 786 787 788
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

789 790 791 792 793
	if (!dev->mt) {
		pr_info("nvm: device has no media manager registered.\n");
		return -ENODEV;
	}

794 795 796 797 798 799
	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
		pr_err("nvm: config type not valid\n");
		return -EINVAL;
	}
	s = &create->conf.s;

800
	if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
801
		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
802
			s->lun_begin, s->lun_end, dev->geo.nr_luns);
803 804 805
		return -EINVAL;
	}

806
	return dev->mt->create_tgt(dev, create);
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
}

static long nvm_ioctl_info(struct file *file, void __user *arg)
{
	struct nvm_ioctl_info *info;
	struct nvm_tgt_type *tt;
	int tgt_iter = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
	if (IS_ERR(info))
		return -EFAULT;

	info->version[0] = NVM_VERSION_MAJOR;
	info->version[1] = NVM_VERSION_MINOR;
	info->version[2] = NVM_VERSION_PATCH;

	down_write(&nvm_lock);
827
	list_for_each_entry(tt, &nvm_tgt_types, list) {
828 829 830 831 832 833 834 835 836 837 838 839 840
		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];

		tgt->version[0] = tt->version[0];
		tgt->version[1] = tt->version[1];
		tgt->version[2] = tt->version[2];
		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);

		tgt_iter++;
	}

	info->tgtsize = tgt_iter;
	up_write(&nvm_lock);

S
Sudip Mukherjee 已提交
841 842
	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
		kfree(info);
843
		return -EFAULT;
S
Sudip Mukherjee 已提交
844
	}
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886

	kfree(info);
	return 0;
}

static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
{
	struct nvm_ioctl_get_devices *devices;
	struct nvm_dev *dev;
	int i = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
	if (!devices)
		return -ENOMEM;

	down_write(&nvm_lock);
	list_for_each_entry(dev, &nvm_devices, devices) {
		struct nvm_ioctl_device_info *info = &devices->info[i];

		sprintf(info->devname, "%s", dev->name);
		if (dev->mt) {
			info->bmversion[0] = dev->mt->version[0];
			info->bmversion[1] = dev->mt->version[1];
			info->bmversion[2] = dev->mt->version[2];
			sprintf(info->bmname, "%s", dev->mt->name);
		} else {
			sprintf(info->bmname, "none");
		}

		i++;
		if (i > 31) {
			pr_err("nvm: max 31 devices can be reported.\n");
			break;
		}
	}
	up_write(&nvm_lock);

	devices->nr_devices = i;

S
Sudip Mukherjee 已提交
887 888 889
	if (copy_to_user(arg, devices,
			 sizeof(struct nvm_ioctl_get_devices))) {
		kfree(devices);
890
		return -EFAULT;
S
Sudip Mukherjee 已提交
891
	}
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921

	kfree(devices);
	return 0;
}

static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
{
	struct nvm_ioctl_create create;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
		return -EFAULT;

	create.dev[DISK_NAME_LEN - 1] = '\0';
	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
	create.tgtname[DISK_NAME_LEN - 1] = '\0';

	if (create.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

	return __nvm_configure_create(&create);
}

static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
	struct nvm_ioctl_remove remove;
922 923
	struct nvm_dev *dev;
	int ret = 0;
924 925 926 927 928 929 930 931 932 933 934 935 936 937

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
		return -EFAULT;

	remove.tgtname[DISK_NAME_LEN - 1] = '\0';

	if (remove.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

938 939 940 941 942 943 944
	list_for_each_entry(dev, &nvm_devices, devices) {
		ret = dev->mt->remove_tgt(dev, &remove);
		if (!ret)
			break;
	}

	return ret;
945 946
}

947 948 949 950 951 952 953 954 955 956 957
static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
{
	info->seqnr = 1;
	info->erase_cnt = 0;
	info->version = 1;
}

static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
{
	struct nvm_dev *dev;
	struct nvm_sb_info info;
958
	int ret;
959 960 961 962 963 964 965 966 967 968 969 970 971 972

	down_write(&nvm_lock);
	dev = nvm_find_nvm_dev(init->dev);
	up_write(&nvm_lock);
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

	nvm_setup_nvm_sb_info(&info);

	strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
	info.fs_ppa.ppa = -1;

973 974 975 976 977
	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
		ret = nvm_init_sysblock(dev, &info);
		if (ret)
			return ret;
	}
978 979 980 981 982 983 984 985

	memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));

	down_write(&nvm_lock);
	dev->mt = nvm_init_mgr(dev);
	up_write(&nvm_lock);

	return 0;
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
}

static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_init init;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
		return -EFAULT;

	if (init.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

	init.dev[DISK_NAME_LEN - 1] = '\0';

	return __nvm_ioctl_dev_init(&init);
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_factory fact;
	struct nvm_dev *dev;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
		return -EFAULT;

	fact.dev[DISK_NAME_LEN - 1] = '\0';

	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
		return -EINVAL;

	down_write(&nvm_lock);
	dev = nvm_find_nvm_dev(fact.dev);
	up_write(&nvm_lock);
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

1032
	nvm_free_mgr(dev);
1033

1034 1035 1036 1037
	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
		return nvm_dev_factory(dev, fact.flags);

	return 0;
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case NVM_INFO:
		return nvm_ioctl_info(file, argp);
	case NVM_GET_DEVICES:
		return nvm_ioctl_get_devices(file, argp);
	case NVM_DEV_CREATE:
		return nvm_ioctl_dev_create(file, argp);
	case NVM_DEV_REMOVE:
		return nvm_ioctl_dev_remove(file, argp);
1053 1054
	case NVM_DEV_INIT:
		return nvm_ioctl_dev_init(file, argp);
1055 1056
	case NVM_DEV_FACTORY:
		return nvm_ioctl_dev_factory(file, argp);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	}
	return 0;
}

static const struct file_operations _ctl_fops = {
	.open = nonseekable_open,
	.unlocked_ioctl = nvm_ctl_ioctl,
	.owner = THIS_MODULE,
	.llseek  = noop_llseek,
};

static struct miscdevice _nvm_misc = {
	.minor		= MISC_DYNAMIC_MINOR,
	.name		= "lightnvm",
	.nodename	= "lightnvm/control",
	.fops		= &_ctl_fops,
};
1074
module_misc_device(_nvm_misc);
1075 1076 1077 1078 1079 1080

MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);

MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");