core.c 29.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
 * Initial release: Matias Bjorling <m@bjorling.me>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; see the file COPYING.  If not, write to
 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
 * USA.
 *
 */

#include <linux/list.h>
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
25
#include <linux/moduleparam.h>
26 27
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
28
#include <linux/sched/sysctl.h>
29

30
static LIST_HEAD(nvm_tgt_types);
31
static DECLARE_RWSEM(nvm_tgtt_lock);
32 33 34
static LIST_HEAD(nvm_devices);
static DECLARE_RWSEM(nvm_lock);

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/* Map between virtual and physical channel and lun */
struct nvm_ch_map {
	int ch_off;
	int nr_luns;
	int *lun_offs;
};

struct nvm_dev_map {
	struct nvm_ch_map *chnls;
	int nr_chnls;
};

struct nvm_area {
	struct list_head list;
	sector_t begin;
	sector_t end;	/* end is excluded */
};

static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
	struct nvm_target *tgt;

	list_for_each_entry(tgt, &dev->targets, list)
		if (!strcmp(name, tgt->disk->disk_name))
			return tgt;

	return NULL;
}

static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
	int i;

	for (i = lun_begin; i <= lun_end; i++) {
		if (test_and_set_bit(i, dev->lun_map)) {
			pr_err("nvm: lun %d already allocated\n", i);
			goto err;
		}
	}

	return 0;
err:
	while (--i > lun_begin)
		clear_bit(i, dev->lun_map);

	return -EBUSY;
}

static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
				 int lun_end)
{
	int i;

	for (i = lun_begin; i <= lun_end; i++)
		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
}

92
static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
93 94 95 96 97 98 99 100 101 102
{
	struct nvm_dev *dev = tgt_dev->parent;
	struct nvm_dev_map *dev_map = tgt_dev->map;
	int i, j;

	for (i = 0; i < dev_map->nr_chnls; i++) {
		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
		int *lun_offs = ch_map->lun_offs;
		int ch = i + ch_map->ch_off;

103 104 105 106
		if (clear) {
			for (j = 0; j < ch_map->nr_luns; j++) {
				int lun = j + lun_offs[j];
				int lunid = (ch * dev->geo.luns_per_chnl) + lun;
107

108 109 110
				WARN_ON(!test_and_clear_bit(lunid,
							dev->lun_map));
			}
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
		}

		kfree(ch_map->lun_offs);
	}

	kfree(dev_map->chnls);
	kfree(dev_map);

	kfree(tgt_dev->luns);
	kfree(tgt_dev);
}

static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
					      int lun_begin, int lun_end)
{
	struct nvm_tgt_dev *tgt_dev = NULL;
	struct nvm_dev_map *dev_rmap = dev->rmap;
	struct nvm_dev_map *dev_map;
	struct ppa_addr *luns;
	int nr_luns = lun_end - lun_begin + 1;
	int luns_left = nr_luns;
	int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
	int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
	int bch = lun_begin / dev->geo.luns_per_chnl;
	int blun = lun_begin % dev->geo.luns_per_chnl;
	int lunid = 0;
	int lun_balanced = 1;
	int prev_nr_luns;
	int i, j;

	nr_chnls = nr_luns / dev->geo.luns_per_chnl;
	nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;

	dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
	if (!dev_map)
		goto err_dev;

	dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
								GFP_KERNEL);
	if (!dev_map->chnls)
		goto err_chnls;

	luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
	if (!luns)
		goto err_luns;

	prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
					dev->geo.luns_per_chnl : luns_left;
	for (i = 0; i < nr_chnls; i++) {
		struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
		int *lun_roffs = ch_rmap->lun_offs;
		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
		int *lun_offs;
		int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
					dev->geo.luns_per_chnl : luns_left;

		if (lun_balanced && prev_nr_luns != luns_in_chnl)
			lun_balanced = 0;

		ch_map->ch_off = ch_rmap->ch_off = bch;
		ch_map->nr_luns = luns_in_chnl;

		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
		if (!lun_offs)
			goto err_ch;

		for (j = 0; j < luns_in_chnl; j++) {
			luns[lunid].ppa = 0;
			luns[lunid].g.ch = i;
			luns[lunid++].g.lun = j;

			lun_offs[j] = blun;
			lun_roffs[j + blun] = blun;
		}

		ch_map->lun_offs = lun_offs;

		/* when starting a new channel, lun offset is reset */
		blun = 0;
		luns_left -= luns_in_chnl;
	}

	dev_map->nr_chnls = nr_chnls;

	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
	if (!tgt_dev)
		goto err_ch;

	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
	/* Target device only owns a portion of the physical device */
	tgt_dev->geo.nr_chnls = nr_chnls;
	tgt_dev->geo.nr_luns = nr_luns;
	tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
	tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
	tgt_dev->q = dev->q;
	tgt_dev->map = dev_map;
	tgt_dev->luns = luns;
	memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));

	tgt_dev->parent = dev;

	return tgt_dev;
err_ch:
	while (--i > 0)
		kfree(dev_map->chnls[i].lun_offs);
	kfree(luns);
err_luns:
	kfree(dev_map->chnls);
err_chnls:
	kfree(dev_map);
err_dev:
	return tgt_dev;
}

static const struct block_device_operations nvm_fops = {
	.owner		= THIS_MODULE,
};

static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
	struct nvm_ioctl_create_simple *s = &create->conf.s;
	struct request_queue *tqueue;
	struct gendisk *tdisk;
	struct nvm_tgt_type *tt;
	struct nvm_target *t;
	struct nvm_tgt_dev *tgt_dev;
	void *targetdata;

	tt = nvm_find_target_type(create->tgttype, 1);
	if (!tt) {
		pr_err("nvm: target type %s not found\n", create->tgttype);
		return -EINVAL;
	}

	mutex_lock(&dev->mlock);
	t = nvm_find_target(dev, create->tgtname);
	if (t) {
		pr_err("nvm: target name already exists.\n");
		mutex_unlock(&dev->mlock);
		return -EINVAL;
	}
	mutex_unlock(&dev->mlock);

	if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
		return -ENOMEM;

	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
	if (!t)
		goto err_reserve;

	tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
	if (!tgt_dev) {
		pr_err("nvm: could not create target device\n");
		goto err_t;
	}

	tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
	if (!tqueue)
		goto err_dev;
	blk_queue_make_request(tqueue, tt->make_rq);

	tdisk = alloc_disk(0);
	if (!tdisk)
		goto err_queue;

	sprintf(tdisk->disk_name, "%s", create->tgtname);
	tdisk->flags = GENHD_FL_EXT_DEVT;
	tdisk->major = 0;
	tdisk->first_minor = 0;
	tdisk->fops = &nvm_fops;
	tdisk->queue = tqueue;

	targetdata = tt->init(tgt_dev, tdisk);
	if (IS_ERR(targetdata))
		goto err_init;

	tdisk->private_data = targetdata;
	tqueue->queuedata = targetdata;

	blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);

	set_capacity(tdisk, tt->capacity(targetdata));
	add_disk(tdisk);

295 296 297
	if (tt->sysfs_init && tt->sysfs_init(tdisk))
		goto err_sysfs;

298 299 300 301 302 303 304 305 306
	t->type = tt;
	t->disk = tdisk;
	t->dev = tgt_dev;

	mutex_lock(&dev->mlock);
	list_add_tail(&t->list, &dev->targets);
	mutex_unlock(&dev->mlock);

	return 0;
307 308 309
err_sysfs:
	if (tt->exit)
		tt->exit(targetdata);
310 311 312 313 314
err_init:
	put_disk(tdisk);
err_queue:
	blk_cleanup_queue(tqueue);
err_dev:
315
	nvm_remove_tgt_dev(tgt_dev, 0);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
err_t:
	kfree(t);
err_reserve:
	nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
	return -ENOMEM;
}

static void __nvm_remove_target(struct nvm_target *t)
{
	struct nvm_tgt_type *tt = t->type;
	struct gendisk *tdisk = t->disk;
	struct request_queue *q = tdisk->queue;

	del_gendisk(tdisk);
	blk_cleanup_queue(q);

332 333 334
	if (tt->sysfs_exit)
		tt->sysfs_exit(tdisk);

335 336 337
	if (tt->exit)
		tt->exit(tdisk->private_data);

338
	nvm_remove_tgt_dev(t->dev, 1);
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	put_disk(tdisk);

	list_del(&t->list);
	kfree(t);
}

/**
 * nvm_remove_tgt - Removes a target from the media manager
 * @dev:	device
 * @remove:	ioctl structure with target name to remove.
 *
 * Returns:
 * 0: on success
 * 1: on not found
 * <0: on error
 */
static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
{
	struct nvm_target *t;

	mutex_lock(&dev->mlock);
	t = nvm_find_target(dev, remove->tgtname);
	if (!t) {
		mutex_unlock(&dev->mlock);
		return 1;
	}
	__nvm_remove_target(t);
	mutex_unlock(&dev->mlock);

	return 0;
}

static int nvm_register_map(struct nvm_dev *dev)
{
	struct nvm_dev_map *rmap;
	int i, j;

	rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
	if (!rmap)
		goto err_rmap;

	rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
								GFP_KERNEL);
	if (!rmap->chnls)
		goto err_chnls;

	for (i = 0; i < dev->geo.nr_chnls; i++) {
		struct nvm_ch_map *ch_rmap;
		int *lun_roffs;
		int luns_in_chnl = dev->geo.luns_per_chnl;

		ch_rmap = &rmap->chnls[i];

		ch_rmap->ch_off = -1;
		ch_rmap->nr_luns = luns_in_chnl;

		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
		if (!lun_roffs)
			goto err_ch;

		for (j = 0; j < luns_in_chnl; j++)
			lun_roffs[j] = -1;

		ch_rmap->lun_offs = lun_roffs;
	}

	dev->rmap = rmap;

	return 0;
err_ch:
	while (--i >= 0)
		kfree(rmap->chnls[i].lun_offs);
err_chnls:
	kfree(rmap);
err_rmap:
	return -ENOMEM;
}

417 418 419 420 421 422 423 424 425 426 427 428
static void nvm_unregister_map(struct nvm_dev *dev)
{
	struct nvm_dev_map *rmap = dev->rmap;
	int i;

	for (i = 0; i < dev->geo.nr_chnls; i++)
		kfree(rmap->chnls[i].lun_offs);

	kfree(rmap->chnls);
	kfree(rmap);
}

429
static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
430 431 432 433 434 435 436 437 438
{
	struct nvm_dev_map *dev_map = tgt_dev->map;
	struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
	int lun_off = ch_map->lun_offs[p->g.lun];

	p->g.ch += ch_map->ch_off;
	p->g.lun += lun_off;
}

439
static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
440 441 442 443 444 445 446 447 448 449
{
	struct nvm_dev *dev = tgt_dev->parent;
	struct nvm_dev_map *dev_rmap = dev->rmap;
	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
	int lun_roff = ch_rmap->lun_offs[p->g.lun];

	p->g.ch -= ch_rmap->ch_off;
	p->g.lun -= lun_roff;
}

450 451
static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
				struct ppa_addr *ppa_list, int nr_ppas)
452 453 454
{
	int i;

455 456 457
	for (i = 0; i < nr_ppas; i++) {
		nvm_map_to_dev(tgt_dev, &ppa_list[i]);
		ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
458
	}
459
}
460

461 462 463 464 465 466 467 468
static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
				struct ppa_addr *ppa_list, int nr_ppas)
{
	int i;

	for (i = 0; i < nr_ppas; i++) {
		ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
		nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
469 470 471
	}
}

472
static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
473
{
474 475 476 477
	if (rqd->nr_ppas == 1) {
		nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
		return;
	}
478

479 480 481 482 483 484 485 486 487
	nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}

static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
	if (rqd->nr_ppas == 1) {
		nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
		return;
	}
488

489
	nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
}

void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
		     int len)
{
	struct nvm_geo *geo = &dev->geo;
	struct nvm_dev_map *dev_rmap = dev->rmap;
	u64 i;

	for (i = 0; i < len; i++) {
		struct nvm_ch_map *ch_rmap;
		int *lun_roffs;
		struct ppa_addr gaddr;
		u64 pba = le64_to_cpu(entries[i]);
		int off;
		u64 diff;

		if (!pba)
			continue;

		gaddr = linear_to_generic_addr(geo, pba);
		ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
		lun_roffs = ch_rmap->lun_offs;

		off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;

		diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
				(lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;

		entries[i] -= cpu_to_le64(diff);
	}
}
EXPORT_SYMBOL(nvm_part_to_tgt);

524
struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
525
{
526
	struct nvm_tgt_type *tmp, *tt = NULL;
527

528
	if (lock)
529
		down_write(&nvm_tgtt_lock);
530

531 532 533 534 535
	list_for_each_entry(tmp, &nvm_tgt_types, list)
		if (!strcmp(name, tmp->name)) {
			tt = tmp;
			break;
		}
536

537
	if (lock)
538
		up_write(&nvm_tgtt_lock);
539
	return tt;
540
}
541
EXPORT_SYMBOL(nvm_find_target_type);
542

543
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
544 545 546
{
	int ret = 0;

547
	down_write(&nvm_tgtt_lock);
548
	if (nvm_find_target_type(tt->name, 0))
549 550
		ret = -EEXIST;
	else
551
		list_add(&tt->list, &nvm_tgt_types);
552
	up_write(&nvm_tgtt_lock);
553 554 555

	return ret;
}
556
EXPORT_SYMBOL(nvm_register_tgt_type);
557

558
void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
559 560 561 562 563 564 565 566
{
	if (!tt)
		return;

	down_write(&nvm_lock);
	list_del(&tt->list);
	up_write(&nvm_lock);
}
567
EXPORT_SYMBOL(nvm_unregister_tgt_type);
568 569 570 571

void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
							dma_addr_t *dma_handler)
{
572
	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
573 574 575 576
								dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);

577
void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
578
{
579
	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
580 581 582 583 584 585 586 587 588 589 590 591 592 593
}
EXPORT_SYMBOL(nvm_dev_dma_free);

static struct nvm_dev *nvm_find_nvm_dev(const char *name)
{
	struct nvm_dev *dev;

	list_for_each_entry(dev, &nvm_devices, devices)
		if (!strcmp(name, dev->name))
			return dev;

	return NULL;
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
		       int nr_ppas, int type)
{
	struct nvm_dev *dev = tgt_dev->parent;
	struct nvm_rq rqd;
	int ret;

	if (nr_ppas > dev->ops->max_phys_sect) {
		pr_err("nvm: unable to update all blocks atomically\n");
		return -EINVAL;
	}

	memset(&rqd, 0, sizeof(struct nvm_rq));

608
	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
609
	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
610 611

	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
612
	nvm_free_rqd_ppalist(tgt_dev, &rqd);
613
	if (ret) {
614
		pr_err("nvm: failed bb mark\n");
615 616 617 618 619 620 621
		return -EINVAL;
	}

	return 0;
}
EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);

622 623 624 625 626 627 628 629
int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
{
	struct nvm_dev *dev = tgt_dev->parent;

	return dev->ops->max_phys_sect;
}
EXPORT_SYMBOL(nvm_max_phys_sects);

630
int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
631
{
632 633
	struct nvm_dev *dev = tgt_dev->parent;

634 635 636
	if (!dev->ops->submit_io)
		return -ENODEV;

637
	nvm_rq_tgt_to_dev(tgt_dev, rqd);
638 639 640

	rqd->dev = tgt_dev;
	return dev->ops->submit_io(dev, rqd);
641 642 643
}
EXPORT_SYMBOL(nvm_submit_io);

644
static void nvm_end_io_sync(struct nvm_rq *rqd)
645
{
646
	struct completion *waiting = rqd->private;
647

648 649
	complete(waiting);
}
650

651 652 653 654 655 656 657
int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
								int nr_ppas)
{
	struct nvm_geo *geo = &tgt_dev->geo;
	struct nvm_rq rqd;
	int ret;
	DECLARE_COMPLETION_ONSTACK(wait);
658 659

	memset(&rqd, 0, sizeof(struct nvm_rq));
660

661 662 663 664 665 666
	rqd.opcode = NVM_OP_ERASE;
	rqd.end_io = nvm_end_io_sync;
	rqd.private = &wait;
	rqd.flags = geo->plane_mode >> 1;

	ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
667 668 669
	if (ret)
		return ret;

670 671 672 673 674 675
	ret = nvm_submit_io(tgt_dev, &rqd);
	if (ret) {
		pr_err("rrpr: erase I/O submission failed: %d\n", ret);
		goto free_ppa_list;
	}
	wait_for_completion_io(&wait);
676

677 678
free_ppa_list:
	nvm_free_rqd_ppalist(tgt_dev, &rqd);
679 680

	return ret;
681
}
682
EXPORT_SYMBOL(nvm_erase_sync);
683

684
int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
685 686
		    nvm_l2p_update_fn *update_l2p, void *priv)
{
687 688
	struct nvm_dev *dev = tgt_dev->parent;

689 690 691 692 693 694 695
	if (!dev->ops->get_l2p_tbl)
		return 0;

	return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
}
EXPORT_SYMBOL(nvm_get_l2p_tbl);

696
int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
697
{
698
	struct nvm_dev *dev = tgt_dev->parent;
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	struct nvm_geo *geo = &dev->geo;
	struct nvm_area *area, *prev, *next;
	sector_t begin = 0;
	sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;

	if (len > max_sectors)
		return -EINVAL;

	area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
	if (!area)
		return -ENOMEM;

	prev = NULL;

	spin_lock(&dev->lock);
	list_for_each_entry(next, &dev->area_list, list) {
		if (begin + len > next->begin) {
			begin = next->end;
			prev = next;
			continue;
		}
		break;
	}

	if ((begin + len) > max_sectors) {
		spin_unlock(&dev->lock);
		kfree(area);
		return -EINVAL;
	}
728

729 730 731 732 733 734 735 736 737 738
	area->begin = *lba = begin;
	area->end = begin + len;

	if (prev) /* insert into sorted order */
		list_add(&area->list, &prev->list);
	else
		list_add(&area->list, &dev->area_list);
	spin_unlock(&dev->lock);

	return 0;
739 740 741
}
EXPORT_SYMBOL(nvm_get_area);

742
void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
743
{
744
	struct nvm_dev *dev = tgt_dev->parent;
745
	struct nvm_area *area;
746

747 748 749 750 751 752 753 754 755 756 757
	spin_lock(&dev->lock);
	list_for_each_entry(area, &dev->area_list, list) {
		if (area->begin != begin)
			continue;

		list_del(&area->list);
		spin_unlock(&dev->lock);
		kfree(area);
		return;
	}
	spin_unlock(&dev->lock);
758 759 760
}
EXPORT_SYMBOL(nvm_put_area);

761
int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
762
			const struct ppa_addr *ppas, int nr_ppas, int vblk)
763
{
764 765
	struct nvm_dev *dev = tgt_dev->parent;
	struct nvm_geo *geo = &tgt_dev->geo;
766
	int i, plane_cnt, pl_idx;
767
	struct ppa_addr ppa;
768

769
	if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
770
		rqd->nr_ppas = nr_ppas;
771
		rqd->ppa_addr = ppas[0];
772 773

		return 0;
774
	}
775

776
	rqd->nr_ppas = nr_ppas;
777 778 779 780 781 782
	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
	if (!rqd->ppa_list) {
		pr_err("nvm: failed to allocate dma memory\n");
		return -ENOMEM;
	}

783 784 785 786
	if (!vblk) {
		for (i = 0; i < nr_ppas; i++)
			rqd->ppa_list[i] = ppas[i];
	} else {
787
		plane_cnt = geo->plane_mode;
788
		rqd->nr_ppas *= plane_cnt;
789

790
		for (i = 0; i < nr_ppas; i++) {
791
			for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
792 793 794
				ppa = ppas[i];
				ppa.g.pl = pl_idx;
				rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
795
			}
796 797 798
		}
	}

799 800 801 802
	return 0;
}
EXPORT_SYMBOL(nvm_set_rqd_ppalist);

803
void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
804 805 806 807
{
	if (!rqd->ppa_list)
		return;

808
	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
809 810 811
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);

812
void nvm_end_io(struct nvm_rq *rqd)
813
{
814 815 816 817
	struct nvm_tgt_dev *tgt_dev = rqd->dev;

	/* Convert address space */
	if (tgt_dev)
818
		nvm_rq_dev_to_tgt(tgt_dev, rqd);
819

820 821
	if (rqd->end_io)
		rqd->end_io(rqd);
822 823 824
}
EXPORT_SYMBOL(nvm_end_io);

825 826 827 828 829 830 831 832 833 834
/*
 * folds a bad block list from its plane representation to its virtual
 * block representation. The fold is done in place and reduced size is
 * returned.
 *
 * If any of the planes status are bad or grown bad block, the virtual block
 * is marked bad. If not bad, the first plane state acts as the block state.
 */
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
{
835
	struct nvm_geo *geo = &dev->geo;
836 837
	int blk, offset, pl, blktype;

838
	if (nr_blks != geo->blks_per_lun * geo->plane_mode)
839 840
		return -EINVAL;

841 842
	for (blk = 0; blk < geo->blks_per_lun; blk++) {
		offset = blk * geo->plane_mode;
843 844 845
		blktype = blks[offset];

		/* Bad blocks on any planes take precedence over other types */
846
		for (pl = 0; pl < geo->plane_mode; pl++) {
847 848 849 850 851 852 853 854 855 856
			if (blks[offset + pl] &
					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
				blktype = blks[offset + pl];
				break;
			}
		}

		blks[blk] = blktype;
	}

857
	return geo->blks_per_lun;
858 859 860
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);

861 862 863
int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
		       u8 *blks)
{
864 865
	struct nvm_dev *dev = tgt_dev->parent;

866
	nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
867

868
	return dev->ops->get_bb_tbl(dev, ppa, blks);
869 870 871
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);

872 873
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
874
	struct nvm_geo *geo = &dev->geo;
875 876
	int i;

877
	dev->lps_per_blk = geo->pgs_per_blk;
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* Just a linear array */
	for (i = 0; i < dev->lps_per_blk; i++)
		dev->lptbl[i] = i;

	return 0;
}

static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
	int i, p;
	struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;

	if (!mlc->num_pairs)
		return 0;

	dev->lps_per_blk = mlc->num_pairs;
	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
	if (!dev->lptbl)
		return -ENOMEM;

	/* The lower page table encoding consists of a list of bytes, where each
	 * has a lower and an upper half. The first half byte maintains the
	 * increment value and every value after is an offset added to the
905 906
	 * previous incrementation value
	 */
907 908 909 910 911 912 913 914 915 916 917 918
	dev->lptbl[0] = mlc->pairs[0] & 0xF;
	for (i = 1; i < dev->lps_per_blk; i++) {
		p = mlc->pairs[i >> 1];
		if (i & 0x1) /* upper */
			dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
		else /* lower */
			dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
	}

	return 0;
}

919 920 921
static int nvm_core_init(struct nvm_dev *dev)
{
	struct nvm_id *id = &dev->identity;
922
	struct nvm_id_group *grp = &id->grp;
923
	struct nvm_geo *geo = &dev->geo;
924
	int ret;
925

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
	/* Whole device values */
	geo->nr_chnls = grp->num_ch;
	geo->luns_per_chnl = grp->num_lun;

	/* Generic device values */
	geo->pgs_per_blk = grp->num_pg;
	geo->blks_per_lun = grp->num_blk;
	geo->nr_planes = grp->num_pln;
	geo->fpg_size = grp->fpg_sz;
	geo->pfpg_size = grp->fpg_sz * grp->num_pln;
	geo->sec_size = grp->csecs;
	geo->oob_size = grp->sos;
	geo->sec_per_pg = grp->fpg_sz / grp->csecs;
	geo->mccap = grp->mccap;
	memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));

	geo->plane_mode = NVM_PLANE_SINGLE;
	geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
944 945

	if (grp->mpos & 0x020202)
946
		geo->plane_mode = NVM_PLANE_DOUBLE;
947
	if (grp->mpos & 0x040404)
948
		geo->plane_mode = NVM_PLANE_QUAD;
949

950 951 952 953 954
	if (grp->mtype != 0) {
		pr_err("nvm: memory type not supported\n");
		return -EINVAL;
	}

955
	/* calculated values */
956 957 958 959
	geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
	geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
	geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
	geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
960

961 962
	dev->total_secs = geo->nr_luns * geo->sec_per_lun;
	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
W
Wenwei Tao 已提交
963 964 965
					sizeof(unsigned long), GFP_KERNEL);
	if (!dev->lun_map)
		return -ENOMEM;
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985

	switch (grp->fmtype) {
	case NVM_ID_FMTYPE_SLC:
		if (nvm_init_slc_tbl(dev, grp)) {
			ret = -ENOMEM;
			goto err_fmtype;
		}
		break;
	case NVM_ID_FMTYPE_MLC:
		if (nvm_init_mlc_tbl(dev, grp)) {
			ret = -ENOMEM;
			goto err_fmtype;
		}
		break;
	default:
		pr_err("nvm: flash type not supported\n");
		ret = -EINVAL;
		goto err_fmtype;
	}

986 987
	INIT_LIST_HEAD(&dev->area_list);
	INIT_LIST_HEAD(&dev->targets);
988
	mutex_init(&dev->mlock);
989
	spin_lock_init(&dev->lock);
990

991 992 993
	ret = nvm_register_map(dev);
	if (ret)
		goto err_fmtype;
994

995
	blk_queue_logical_block_size(dev->q, geo->sec_size);
996
	return 0;
997 998 999
err_fmtype:
	kfree(dev->lun_map);
	return ret;
1000 1001
}

1002
void nvm_free(struct nvm_dev *dev)
1003 1004 1005 1006
{
	if (!dev)
		return;

1007 1008 1009
	if (dev->dma_pool)
		dev->ops->destroy_dma_pool(dev->dma_pool);

1010
	nvm_unregister_map(dev);
1011
	kfree(dev->lptbl);
1012
	kfree(dev->lun_map);
1013
	kfree(dev);
1014 1015 1016 1017
}

static int nvm_init(struct nvm_dev *dev)
{
1018
	struct nvm_geo *geo = &dev->geo;
1019
	int ret = -EINVAL;
1020

1021
	if (dev->ops->identity(dev, &dev->identity)) {
1022 1023 1024 1025
		pr_err("nvm: device could not be identified\n");
		goto err;
	}

1026 1027
	pr_debug("nvm: ver:%x nvm_vendor:%x\n",
			dev->identity.ver_id, dev->identity.vmnt);
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

	if (dev->identity.ver_id != 1) {
		pr_err("nvm: device not supported by kernel.");
		goto err;
	}

	ret = nvm_core_init(dev);
	if (ret) {
		pr_err("nvm: could not initialize core structures.\n");
		goto err;
	}

	pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1041 1042 1043
			dev->name, geo->sec_per_pg, geo->nr_planes,
			geo->pgs_per_blk, geo->blks_per_lun,
			geo->nr_luns, geo->nr_chnls);
1044 1045 1046 1047 1048 1049
	return 0;
err:
	pr_err("nvm: failed to initialize nvm\n");
	return ret;
}

1050
struct nvm_dev *nvm_alloc_dev(int node)
1051
{
1052
	return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1053
}
1054
EXPORT_SYMBOL(nvm_alloc_dev);
1055

1056
int nvm_register(struct nvm_dev *dev)
1057 1058 1059
{
	int ret;

1060 1061
	if (!dev->q || !dev->ops)
		return -EINVAL;
1062

1063 1064
	if (dev->ops->max_phys_sect > 256) {
		pr_info("nvm: max sectors supported is 256.\n");
1065
		return -EINVAL;
1066 1067
	}

1068
	if (dev->ops->max_phys_sect > 1) {
1069 1070 1071
		dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
		if (!dev->dma_pool) {
			pr_err("nvm: could not create dma pool\n");
1072
			return -ENOMEM;
1073 1074 1075
		}
	}

1076 1077 1078
	ret = nvm_init(dev);
	if (ret)
		goto err_init;
1079

1080
	/* register device with a supported media manager */
1081 1082 1083 1084
	down_write(&nvm_lock);
	list_add(&dev->devices, &nvm_devices);
	up_write(&nvm_lock);

1085 1086
	return 0;
err_init:
1087
	dev->ops->destroy_dma_pool(dev->dma_pool);
1088 1089 1090 1091
	return ret;
}
EXPORT_SYMBOL(nvm_register);

1092
void nvm_unregister(struct nvm_dev *dev)
1093
{
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	struct nvm_target *t, *tmp;

	mutex_lock(&dev->mlock);
	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
		if (t->dev->parent != dev)
			continue;
		__nvm_remove_target(t);
	}
	mutex_unlock(&dev->mlock);

W
Wenwei Tao 已提交
1104
	down_write(&nvm_lock);
1105 1106
	list_del(&dev->devices);
	up_write(&nvm_lock);
1107

1108
	nvm_free(dev);
1109 1110 1111 1112 1113 1114 1115 1116
}
EXPORT_SYMBOL(nvm_unregister);

static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
	struct nvm_dev *dev;
	struct nvm_ioctl_create_simple *s;

W
Wenwei Tao 已提交
1117
	down_write(&nvm_lock);
1118
	dev = nvm_find_nvm_dev(create->dev);
W
Wenwei Tao 已提交
1119
	up_write(&nvm_lock);
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	if (!dev) {
		pr_err("nvm: device not found\n");
		return -EINVAL;
	}

	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
		pr_err("nvm: config type not valid\n");
		return -EINVAL;
	}
	s = &create->conf.s;

1132 1133 1134 1135 1136
	if (s->lun_begin == -1 && s->lun_end == -1) {
		s->lun_begin = 0;
		s->lun_end = dev->geo.nr_luns - 1;
	}

1137
	if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
1138
		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1139
			s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
1140 1141 1142
		return -EINVAL;
	}

1143
	return nvm_create_tgt(dev, create);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
}

static long nvm_ioctl_info(struct file *file, void __user *arg)
{
	struct nvm_ioctl_info *info;
	struct nvm_tgt_type *tt;
	int tgt_iter = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
	if (IS_ERR(info))
		return -EFAULT;

	info->version[0] = NVM_VERSION_MAJOR;
	info->version[1] = NVM_VERSION_MINOR;
	info->version[2] = NVM_VERSION_PATCH;

	down_write(&nvm_lock);
1164
	list_for_each_entry(tt, &nvm_tgt_types, list) {
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];

		tgt->version[0] = tt->version[0];
		tgt->version[1] = tt->version[1];
		tgt->version[2] = tt->version[2];
		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);

		tgt_iter++;
	}

	info->tgtsize = tgt_iter;
	up_write(&nvm_lock);

S
Sudip Mukherjee 已提交
1178 1179
	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
		kfree(info);
1180
		return -EFAULT;
S
Sudip Mukherjee 已提交
1181
	}
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

	kfree(info);
	return 0;
}

static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
{
	struct nvm_ioctl_get_devices *devices;
	struct nvm_dev *dev;
	int i = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
	if (!devices)
		return -ENOMEM;

	down_write(&nvm_lock);
	list_for_each_entry(dev, &nvm_devices, devices) {
		struct nvm_ioctl_device_info *info = &devices->info[i];

		sprintf(info->devname, "%s", dev->name);

1206 1207 1208 1209 1210
		/* kept for compatibility */
		info->bmversion[0] = 1;
		info->bmversion[1] = 0;
		info->bmversion[2] = 0;
		sprintf(info->bmname, "%s", "gennvm");
1211
		i++;
1212

1213 1214 1215 1216 1217 1218 1219 1220 1221
		if (i > 31) {
			pr_err("nvm: max 31 devices can be reported.\n");
			break;
		}
	}
	up_write(&nvm_lock);

	devices->nr_devices = i;

S
Sudip Mukherjee 已提交
1222 1223 1224
	if (copy_to_user(arg, devices,
			 sizeof(struct nvm_ioctl_get_devices))) {
		kfree(devices);
1225
		return -EFAULT;
S
Sudip Mukherjee 已提交
1226
	}
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

	kfree(devices);
	return 0;
}

static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
{
	struct nvm_ioctl_create create;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
		return -EFAULT;

	create.dev[DISK_NAME_LEN - 1] = '\0';
	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
	create.tgtname[DISK_NAME_LEN - 1] = '\0';

	if (create.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

	return __nvm_configure_create(&create);
}

static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
	struct nvm_ioctl_remove remove;
1257 1258
	struct nvm_dev *dev;
	int ret = 0;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
		return -EFAULT;

	remove.tgtname[DISK_NAME_LEN - 1] = '\0';

	if (remove.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

1273
	list_for_each_entry(dev, &nvm_devices, devices) {
1274
		ret = nvm_remove_tgt(dev, &remove);
1275 1276 1277 1278 1279
		if (!ret)
			break;
	}

	return ret;
1280 1281
}

1282
/* kept for compatibility reasons */
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_init init;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
		return -EFAULT;

	if (init.flags != 0) {
		pr_err("nvm: no flags supported\n");
		return -EINVAL;
	}

1298
	return 0;
1299 1300
}

1301
/* Kept for compatibility reasons */
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
{
	struct nvm_ioctl_dev_factory fact;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
		return -EFAULT;

	fact.dev[DISK_NAME_LEN - 1] = '\0';

	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
		return -EINVAL;

1317
	return 0;
1318 1319
}

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case NVM_INFO:
		return nvm_ioctl_info(file, argp);
	case NVM_GET_DEVICES:
		return nvm_ioctl_get_devices(file, argp);
	case NVM_DEV_CREATE:
		return nvm_ioctl_dev_create(file, argp);
	case NVM_DEV_REMOVE:
		return nvm_ioctl_dev_remove(file, argp);
1333 1334
	case NVM_DEV_INIT:
		return nvm_ioctl_dev_init(file, argp);
1335 1336
	case NVM_DEV_FACTORY:
		return nvm_ioctl_dev_factory(file, argp);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	}
	return 0;
}

static const struct file_operations _ctl_fops = {
	.open = nonseekable_open,
	.unlocked_ioctl = nvm_ctl_ioctl,
	.owner = THIS_MODULE,
	.llseek  = noop_llseek,
};

static struct miscdevice _nvm_misc = {
	.minor		= MISC_DYNAMIC_MINOR,
	.name		= "lightnvm",
	.nodename	= "lightnvm/control",
	.fops		= &_ctl_fops,
};
1354
builtin_misc_device(_nvm_misc);