pblk-init.c 27.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
 * Copyright (C) 2016 CNEX Labs
 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
 *                  Matias Bjorling <matias@cnexlabs.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * Implementation of a physical block-device target for Open-channel SSDs.
 *
 * pblk-init.c - pblk's initialization.
 */

#include "pblk.h"

23
static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
24
				*pblk_w_rq_cache;
25
static DECLARE_RWSEM(pblk_lock);
26
struct bio_set *pblk_bio_set;
27 28 29 30 31 32 33 34 35 36

static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
			  struct bio *bio)
{
	int ret;

	/* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
	 * constraint. Writes can be of arbitrary size.
	 */
	if (bio_data_dir(bio) == READ) {
37
		blk_queue_split(q, &bio);
38 39 40 41 42 43 44 45 46 47 48
		ret = pblk_submit_read(pblk, bio);
		if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
			bio_put(bio);

		return ret;
	}

	/* Prevent deadlock in the case of a modest LUN configuration and large
	 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
	 * available for user I/O.
	 */
49
	if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50
		blk_queue_split(q, &bio);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

	return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}

static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
	struct pblk *pblk = q->queuedata;

	if (bio_op(bio) == REQ_OP_DISCARD) {
		pblk_discard(pblk, bio);
		if (!(bio->bi_opf & REQ_PREFLUSH)) {
			bio_endio(bio);
			return BLK_QC_T_NONE;
		}
	}

	switch (pblk_rw_io(q, pblk, bio)) {
	case NVM_IO_ERR:
		bio_io_error(bio);
		break;
	case NVM_IO_DONE:
		bio_endio(bio);
		break;
	}

	return BLK_QC_T_NONE;
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
static size_t pblk_trans_map_size(struct pblk *pblk)
{
	int entry_size = 8;

	if (pblk->ppaf_bitsize < 32)
		entry_size = 4;

	return entry_size * pblk->rl.nr_secs;
}

#ifdef CONFIG_NVM_DEBUG
static u32 pblk_l2p_crc(struct pblk *pblk)
{
	size_t map_size;
	u32 crc = ~(u32)0;

	map_size = pblk_trans_map_size(pblk);
	crc = crc32_le(crc, pblk->trans_map, map_size);
	return crc;
}
#endif

101 102 103 104 105 106 107 108 109
static void pblk_l2p_free(struct pblk *pblk)
{
	vfree(pblk->trans_map);
}

static int pblk_l2p_init(struct pblk *pblk)
{
	sector_t i;
	struct ppa_addr ppa;
110
	size_t map_size;
111

112 113
	map_size = pblk_trans_map_size(pblk);
	pblk->trans_map = vmalloc(map_size);
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
	if (!pblk->trans_map)
		return -ENOMEM;

	pblk_ppa_set_empty(&ppa);

	for (i = 0; i < pblk->rl.nr_secs; i++)
		pblk_trans_map_set(pblk, i, ppa);

	return 0;
}

static void pblk_rwb_free(struct pblk *pblk)
{
	if (pblk_rb_tear_down_check(&pblk->rwb))
		pr_err("pblk: write buffer error on tear down\n");

	pblk_rb_data_free(&pblk->rwb);
	vfree(pblk_rb_entries_ref(&pblk->rwb));
}

static int pblk_rwb_init(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_rb_entry *entries;
	unsigned long nr_entries;
	unsigned int power_size, power_seg_sz;

	nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);

	entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
	if (!entries)
		return -ENOMEM;

	power_size = get_count_order(nr_entries);
	power_seg_sz = get_count_order(geo->sec_size);

	return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
}

/* Minimum pages needed within a lun */
#define ADDR_POOL_SIZE 64

static int pblk_set_ppaf(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct nvm_addr_format ppaf = geo->ppaf;
	int power_len;

	/* Re-calculate channel and lun format to adapt to configuration */
	power_len = get_count_order(geo->nr_chnls);
	if (1 << power_len != geo->nr_chnls) {
		pr_err("pblk: supports only power-of-two channel config.\n");
		return -EINVAL;
	}
	ppaf.ch_len = power_len;

172 173
	power_len = get_count_order(geo->nr_luns);
	if (1 << power_len != geo->nr_luns) {
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
		pr_err("pblk: supports only power-of-two LUN config.\n");
		return -EINVAL;
	}
	ppaf.lun_len = power_len;

	pblk->ppaf.sec_offset = 0;
	pblk->ppaf.pln_offset = ppaf.sect_len;
	pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
	pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
	pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
	pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
	pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
	pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
							pblk->ppaf.pln_offset;
	pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
							pblk->ppaf.ch_offset;
	pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
							pblk->ppaf.lun_offset;
	pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
							pblk->ppaf.pg_offset;
	pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
							pblk->ppaf.blk_offset;

	pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;

	return 0;
}

static int pblk_init_global_caches(struct pblk *pblk)
{
	down_write(&pblk_lock);
205
	pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
206
				sizeof(struct pblk_line_ws), 0, 0, NULL);
207
	if (!pblk_ws_cache) {
208 209 210 211 212 213 214
		up_write(&pblk_lock);
		return -ENOMEM;
	}

	pblk_rec_cache = kmem_cache_create("pblk_rec",
				sizeof(struct pblk_rec_ctx), 0, 0, NULL);
	if (!pblk_rec_cache) {
215
		kmem_cache_destroy(pblk_ws_cache);
216 217 218 219
		up_write(&pblk_lock);
		return -ENOMEM;
	}

220
	pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
221
				0, 0, NULL);
222
	if (!pblk_g_rq_cache) {
223
		kmem_cache_destroy(pblk_ws_cache);
224 225 226 227 228 229 230 231
		kmem_cache_destroy(pblk_rec_cache);
		up_write(&pblk_lock);
		return -ENOMEM;
	}

	pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
				0, 0, NULL);
	if (!pblk_w_rq_cache) {
232
		kmem_cache_destroy(pblk_ws_cache);
233
		kmem_cache_destroy(pblk_rec_cache);
234
		kmem_cache_destroy(pblk_g_rq_cache);
235 236 237 238 239 240 241 242
		up_write(&pblk_lock);
		return -ENOMEM;
	}
	up_write(&pblk_lock);

	return 0;
}

243 244 245 246 247 248 249 250
static void pblk_free_global_caches(struct pblk *pblk)
{
	kmem_cache_destroy(pblk_ws_cache);
	kmem_cache_destroy(pblk_rec_cache);
	kmem_cache_destroy(pblk_g_rq_cache);
	kmem_cache_destroy(pblk_w_rq_cache);
}

251 252 253 254 255 256
static int pblk_core_init(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;

	pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
257
						geo->nr_planes * geo->all_luns;
258 259 260 261

	if (pblk_init_global_caches(pblk))
		return -ENOMEM;

262
	/* Internal bios can be at most the sectors signaled by the device. */
263 264 265
	pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
									0);
	if (!pblk->page_bio_pool)
266
		goto free_global_caches;
267

268 269 270
	pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
							pblk_ws_cache);
	if (!pblk->gen_ws_pool)
271
		goto free_page_bio_pool;
272

273 274
	pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
							pblk_rec_cache);
275
	if (!pblk->rec_pool)
276
		goto free_gen_ws_pool;
277

278
	pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
279
							pblk_g_rq_cache);
280
	if (!pblk->r_rq_pool)
281 282
		goto free_rec_pool;

283
	pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
284 285 286 287
							pblk_g_rq_cache);
	if (!pblk->e_rq_pool)
		goto free_r_rq_pool;

288
	pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
289
							pblk_w_rq_cache);
290
	if (!pblk->w_rq_pool)
291
		goto free_e_rq_pool;
292

293 294 295
	pblk->close_wq = alloc_workqueue("pblk-close-wq",
			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
	if (!pblk->close_wq)
296
		goto free_w_rq_pool;
297

298 299 300 301 302
	pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
	if (!pblk->bb_wq)
		goto free_close_wq;

303 304 305
	pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
	if (!pblk->r_end_wq)
306
		goto free_bb_wq;
307

308 309 310
	if (pblk_set_ppaf(pblk))
		goto free_r_end_wq;

311
	if (pblk_rwb_init(pblk))
312
		goto free_r_end_wq;
313 314 315 316

	INIT_LIST_HEAD(&pblk->compl_list);
	return 0;

317 318
free_r_end_wq:
	destroy_workqueue(pblk->r_end_wq);
319 320 321 322
free_bb_wq:
	destroy_workqueue(pblk->bb_wq);
free_close_wq:
	destroy_workqueue(pblk->close_wq);
323 324
free_w_rq_pool:
	mempool_destroy(pblk->w_rq_pool);
325 326 327 328
free_e_rq_pool:
	mempool_destroy(pblk->e_rq_pool);
free_r_rq_pool:
	mempool_destroy(pblk->r_rq_pool);
329 330
free_rec_pool:
	mempool_destroy(pblk->rec_pool);
331 332
free_gen_ws_pool:
	mempool_destroy(pblk->gen_ws_pool);
333 334
free_page_bio_pool:
	mempool_destroy(pblk->page_bio_pool);
335 336
free_global_caches:
	pblk_free_global_caches(pblk);
337 338 339 340 341
	return -ENOMEM;
}

static void pblk_core_free(struct pblk *pblk)
{
342 343 344
	if (pblk->close_wq)
		destroy_workqueue(pblk->close_wq);

345 346 347
	if (pblk->r_end_wq)
		destroy_workqueue(pblk->r_end_wq);

348 349
	if (pblk->bb_wq)
		destroy_workqueue(pblk->bb_wq);
350

351
	mempool_destroy(pblk->page_bio_pool);
352
	mempool_destroy(pblk->gen_ws_pool);
353
	mempool_destroy(pblk->rec_pool);
354 355
	mempool_destroy(pblk->r_rq_pool);
	mempool_destroy(pblk->e_rq_pool);
356 357
	mempool_destroy(pblk->w_rq_pool);

358 359
	pblk_rwb_free(pblk);

360
	pblk_free_global_caches(pblk);
361 362 363 364 365 366 367
}

static void pblk_luns_free(struct pblk *pblk)
{
	kfree(pblk->luns);
}

368 369 370 371 372 373
static void pblk_free_line_bitmaps(struct pblk_line *line)
{
	kfree(line->blk_bitmap);
	kfree(line->erase_bitmap);
}

374 375 376 377 378 379 380 381 382 383 384
static void pblk_lines_free(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line *line;
	int i;

	spin_lock(&l_mg->free_lock);
	for (i = 0; i < l_mg->nr_lines; i++) {
		line = &pblk->lines[i];

		pblk_line_free(pblk, line);
385
		pblk_free_line_bitmaps(line);
386 387 388 389 390 391 392 393 394 395 396
	}
	spin_unlock(&l_mg->free_lock);
}

static void pblk_line_meta_free(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	int i;

	kfree(l_mg->bb_template);
	kfree(l_mg->bb_aux);
397
	kfree(l_mg->vsc_list);
398 399

	for (i = 0; i < PBLK_DATA_LINES; i++) {
400
		kfree(l_mg->sline_meta[i]);
401
		pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
402
		kfree(l_mg->eline_meta[i]);
403 404 405 406 407 408 409 410 411 412 413 414
	}

	kfree(pblk->lines);
}

static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
{
	struct nvm_geo *geo = &dev->geo;
	struct ppa_addr ppa;
	u8 *blks;
	int nr_blks, ret;

415
	nr_blks = geo->nr_chks * geo->plane_mode;
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	blks = kmalloc(nr_blks, GFP_KERNEL);
	if (!blks)
		return -ENOMEM;

	ppa.ppa = 0;
	ppa.g.ch = rlun->bppa.g.ch;
	ppa.g.lun = rlun->bppa.g.lun;

	ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
	if (ret)
		goto out;

	nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
	if (nr_blks < 0) {
		ret = nr_blks;
431
		goto out;
432 433 434 435
	}

	rlun->bb_list = blks;

436
	return 0;
437
out:
438
	kfree(blks);
439 440 441
	return ret;
}

442 443
static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
			int blk_per_line)
444
{
445 446
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
447 448 449 450
	struct pblk_lun *rlun;
	int bb_cnt = 0;
	int i;

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	for (i = 0; i < blk_per_line; i++) {
		rlun = &pblk->luns[i];
		if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
			continue;

		set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
		bb_cnt++;
	}

	return bb_cnt;
}

static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
{
	struct pblk_line_meta *lm = &pblk->lm;

467 468 469 470 471 472 473 474 475 476
	line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
	if (!line->blk_bitmap)
		return -ENOMEM;

	line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
	if (!line->erase_bitmap) {
		kfree(line->blk_bitmap);
		return -ENOMEM;
	}

477
	return 0;
478 479 480 481 482 483 484 485 486 487
}

static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_lun *rlun;
	int i, ret;

	/* TODO: Implement unbalanced LUN support */
488
	if (geo->nr_luns < 0) {
489 490 491 492
		pr_err("pblk: unbalanced LUN config.\n");
		return -EINVAL;
	}

493 494
	pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
								GFP_KERNEL);
495 496 497
	if (!pblk->luns)
		return -ENOMEM;

498
	for (i = 0; i < geo->all_luns; i++) {
499 500 501
		/* Stripe across channels */
		int ch = i % geo->nr_chnls;
		int lun_raw = i / geo->nr_chnls;
502
		int lunid = lun_raw + ch * geo->nr_luns;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532

		rlun = &pblk->luns[i];
		rlun->bppa = luns[lunid];

		sema_init(&rlun->wr_sem, 1);

		ret = pblk_bb_discovery(dev, rlun);
		if (ret) {
			while (--i >= 0)
				kfree(pblk->luns[i].bb_list);
			return ret;
		}
	}

	return 0;
}

static int pblk_lines_configure(struct pblk *pblk, int flags)
{
	struct pblk_line *line = NULL;
	int ret = 0;

	if (!(flags & NVM_TARGET_FACTORY)) {
		line = pblk_recov_l2p(pblk);
		if (IS_ERR(line)) {
			pr_err("pblk: could not recover l2p table\n");
			ret = -EFAULT;
		}
	}

533 534 535 536
#ifdef CONFIG_NVM_DEBUG
	pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif

537 538 539
	/* Free full lines directly as GC has not been started yet */
	pblk_gc_free_full_lines(pblk);

540 541 542 543 544 545 546 547 548 549 550 551 552
	if (!line) {
		/* Configure next line for user data */
		line = pblk_line_get_first_data(pblk);
		if (!line) {
			pr_err("pblk: line list corrupted\n");
			ret = -EFAULT;
		}
	}

	return ret;
}

/* See comment over struct line_emeta definition */
553
static unsigned int calc_emeta_len(struct pblk *pblk)
554
{
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;

	/* Round to sector size so that lba_list starts on its own sector */
	lm->emeta_sec[1] = DIV_ROUND_UP(
			sizeof(struct line_emeta) + lm->blk_bitmap_len,
			geo->sec_size);
	lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;

	/* Round to sector size so that vsc_list starts on its own sector */
	lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
	lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
			geo->sec_size);
	lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;

	lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
			geo->sec_size);
	lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;

	lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);

	return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
579 580 581 582 583
}

static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
	struct nvm_tgt_dev *dev = pblk->dev;
584 585
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
586 587
	struct nvm_geo *geo = &dev->geo;
	sector_t provisioned;
588
	int sec_meta, blk_meta;
589

590 591 592 593
	if (geo->op == NVM_TARGET_DEFAULT_OP)
		pblk->op = PBLK_DEFAULT_OP;
	else
		pblk->op = geo->op;
594 595

	provisioned = nr_free_blks;
596
	provisioned *= (100 - pblk->op);
597 598
	sector_div(provisioned, 100);

599 600
	pblk->op_blks = nr_free_blks - provisioned;

601 602 603 604
	/* Internally pblk manages all free blocks, but all calculations based
	 * on user capacity consider only provisioned blocks
	 */
	pblk->rl.total_blocks = nr_free_blks;
605
	pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
606 607 608 609 610 611 612

	/* Consider sectors used for metadata */
	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
	blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);

	pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;

613
	atomic_set(&pblk->rl.free_blocks, nr_free_blks);
614
	atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
615 616
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static int pblk_lines_alloc_metadata(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	int i;

	/* smeta is always small enough to fit on a kmalloc memory allocation,
	 * emeta depends on the number of LUNs allocated to the pblk instance
	 */
	for (i = 0; i < PBLK_DATA_LINES; i++) {
		l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
		if (!l_mg->sline_meta[i])
			goto fail_free_smeta;
	}

	/* emeta allocates three different buffers for managing metadata with
	 * in-memory and in-media layouts
	 */
	for (i = 0; i < PBLK_DATA_LINES; i++) {
		struct pblk_emeta *emeta;

		emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
		if (!emeta)
			goto fail_free_emeta;

		if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
			l_mg->emeta_alloc_type = PBLK_VMALLOC_META;

			emeta->buf = vmalloc(lm->emeta_len[0]);
			if (!emeta->buf) {
				kfree(emeta);
				goto fail_free_emeta;
			}

			emeta->nr_entries = lm->emeta_sec[0];
			l_mg->eline_meta[i] = emeta;
		} else {
			l_mg->emeta_alloc_type = PBLK_KMALLOC_META;

			emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
			if (!emeta->buf) {
				kfree(emeta);
				goto fail_free_emeta;
			}

			emeta->nr_entries = lm->emeta_sec[0];
			l_mg->eline_meta[i] = emeta;
		}
	}

	l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
	if (!l_mg->vsc_list)
		goto fail_free_emeta;

	for (i = 0; i < l_mg->nr_lines; i++)
		l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);

	return 0;

fail_free_emeta:
	while (--i >= 0) {
678 679 680 681
		if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
			vfree(l_mg->eline_meta[i]->buf);
		else
			kfree(l_mg->eline_meta[i]->buf);
682
		kfree(l_mg->eline_meta[i]);
683 684 685 686
	}

fail_free_smeta:
	for (i = 0; i < PBLK_DATA_LINES; i++)
687
		kfree(l_mg->sline_meta[i]);
688 689 690 691

	return -ENOMEM;
}

692 693 694 695 696 697 698 699
static int pblk_lines_init(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_line *line;
	unsigned int smeta_len, emeta_len;
700
	long nr_bad_blks, nr_free_blks;
701 702 703 704
	int bb_distance, max_write_ppas, mod;
	int i, ret;

	pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
705
	max_write_ppas = pblk->min_write_pgs * geo->all_luns;
706 707 708 709 710 711 712 713 714
	pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
				max_write_ppas : nvm_max_phys_sects(dev);
	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);

	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
		pr_err("pblk: cannot support device max_phys_sect\n");
		return -EINVAL;
	}

715
	div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
716 717 718 719 720
	if (mod) {
		pr_err("pblk: bad configuration of sectors/pages\n");
		return -EINVAL;
	}

721
	l_mg->nr_lines = geo->nr_chks;
722 723 724 725
	l_mg->log_line = l_mg->data_line = NULL;
	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
	l_mg->nr_free_lines = 0;
	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
726

727 728 729
	lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
	lm->blk_per_line = geo->all_luns;
	lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
730
	lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
731
	lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
732 733
	lm->mid_thrs = lm->sec_per_line / 2;
	lm->high_thrs = lm->sec_per_line / 4;
734
	lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
735 736 737 738 739 740 741 742 743

	/* Calculate necessary pages for smeta. See comment over struct
	 * line_smeta definition
	 */
	i = 1;
add_smeta_page:
	lm->smeta_sec = i * geo->sec_per_pl;
	lm->smeta_len = lm->smeta_sec * geo->sec_size;

744
	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
745 746 747 748 749 750 751 752 753 754
	if (smeta_len > lm->smeta_len) {
		i++;
		goto add_smeta_page;
	}

	/* Calculate necessary pages for emeta. See comment over struct
	 * line_emeta definition
	 */
	i = 1;
add_emeta_page:
755 756
	lm->emeta_sec[0] = i * geo->sec_per_pl;
	lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
757

758 759
	emeta_len = calc_emeta_len(pblk);
	if (emeta_len > lm->emeta_len[0]) {
760 761 762 763
		i++;
		goto add_emeta_page;
	}

764
	lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
765 766

	lm->min_blk_line = 1;
767
	if (geo->all_luns > 1)
768
		lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
769
					lm->emeta_sec[0], geo->sec_per_chk);
770

771 772 773 774 775 776
	if (lm->min_blk_line > lm->blk_per_line) {
		pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
							lm->blk_per_line);
		ret = -EINVAL;
		goto fail;
	}
777

778 779 780
	ret = pblk_lines_alloc_metadata(pblk);
	if (ret)
		goto fail;
781 782

	l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
783 784
	if (!l_mg->bb_template) {
		ret = -ENOMEM;
785
		goto fail_free_meta;
786
	}
787 788

	l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
789 790
	if (!l_mg->bb_aux) {
		ret = -ENOMEM;
791
		goto fail_free_bb_template;
792
	}
793

794
	bb_distance = (geo->all_luns) * geo->sec_per_pl;
795 796 797 798 799 800 801 802 803 804 805 806
	for (i = 0; i < lm->sec_per_line; i += bb_distance)
		bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);

	INIT_LIST_HEAD(&l_mg->free_list);
	INIT_LIST_HEAD(&l_mg->corrupt_list);
	INIT_LIST_HEAD(&l_mg->bad_list);
	INIT_LIST_HEAD(&l_mg->gc_full_list);
	INIT_LIST_HEAD(&l_mg->gc_high_list);
	INIT_LIST_HEAD(&l_mg->gc_mid_list);
	INIT_LIST_HEAD(&l_mg->gc_low_list);
	INIT_LIST_HEAD(&l_mg->gc_empty_list);

807 808
	INIT_LIST_HEAD(&l_mg->emeta_list);

809 810 811 812 813
	l_mg->gc_lists[0] = &l_mg->gc_high_list;
	l_mg->gc_lists[1] = &l_mg->gc_mid_list;
	l_mg->gc_lists[2] = &l_mg->gc_low_list;

	spin_lock_init(&l_mg->free_lock);
814
	spin_lock_init(&l_mg->close_lock);
815 816 817 818
	spin_lock_init(&l_mg->gc_lock);

	pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
								GFP_KERNEL);
819 820
	if (!pblk->lines) {
		ret = -ENOMEM;
821
		goto fail_free_bb_aux;
822
	}
823 824 825

	nr_free_blks = 0;
	for (i = 0; i < l_mg->nr_lines; i++) {
826 827
		int blk_in_line;

828 829 830 831 832 833 834
		line = &pblk->lines[i];

		line->pblk = pblk;
		line->id = i;
		line->type = PBLK_LINETYPE_FREE;
		line->state = PBLK_LINESTATE_FREE;
		line->gc_group = PBLK_LINEGC_NONE;
835
		line->vsc = &l_mg->vsc_list[i];
836 837
		spin_lock_init(&line->lock);

838 839 840 841 842
		ret = pblk_alloc_line_bitmaps(pblk, line);
		if (ret)
			goto fail_free_lines;

		nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
843
		if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
844
			pblk_free_line_bitmaps(line);
845
			ret = -EINVAL;
846
			goto fail_free_lines;
847
		}
848

849 850
		blk_in_line = lm->blk_per_line - nr_bad_blks;
		if (blk_in_line < lm->min_blk_line) {
851 852 853 854 855
			line->state = PBLK_LINESTATE_BAD;
			list_add_tail(&line->list, &l_mg->bad_list);
			continue;
		}

856 857
		nr_free_blks += blk_in_line;
		atomic_set(&line->blk_in_line, blk_in_line);
858 859 860 861 862 863 864 865

		l_mg->nr_free_lines++;
		list_add_tail(&line->list, &l_mg->free_list);
	}

	pblk_set_provision(pblk, nr_free_blks);

	/* Cleanup per-LUN bad block lists - managed within lines on run-time */
866
	for (i = 0; i < geo->all_luns; i++)
867 868 869 870
		kfree(pblk->luns[i].bb_list);

	return 0;
fail_free_lines:
871 872
	while (--i >= 0)
		pblk_free_line_bitmaps(&pblk->lines[i]);
873 874 875 876 877
fail_free_bb_aux:
	kfree(l_mg->bb_aux);
fail_free_bb_template:
	kfree(l_mg->bb_template);
fail_free_meta:
878
	pblk_line_meta_free(pblk);
879
fail:
880
	for (i = 0; i < geo->all_luns; i++)
881 882 883 884 885 886 887 888 889
		kfree(pblk->luns[i].bb_list);

	return ret;
}

static int pblk_writer_init(struct pblk *pblk)
{
	pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
	if (IS_ERR(pblk->writer_ts)) {
890 891 892 893 894 895
		int err = PTR_ERR(pblk->writer_ts);

		if (err != -EINTR)
			pr_err("pblk: could not allocate writer kthread (%d)\n",
					err);
		return err;
896 897
	}

898 899 900
	timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));

901 902 903 904 905
	return 0;
}

static void pblk_writer_stop(struct pblk *pblk)
{
906 907 908 909 910 911 912 913 914
	/* The pipeline must be stopped and the write buffer emptied before the
	 * write thread is stopped
	 */
	WARN(pblk_rb_read_count(&pblk->rwb),
			"Stopping not fully persisted write buffer\n");

	WARN(pblk_rb_sync_count(&pblk->rwb),
			"Stopping not fully synced write buffer\n");

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
	if (pblk->writer_ts)
		kthread_stop(pblk->writer_ts);
	del_timer(&pblk->wtimer);
}

static void pblk_free(struct pblk *pblk)
{
	pblk_luns_free(pblk);
	pblk_lines_free(pblk);
	pblk_line_meta_free(pblk);
	pblk_core_free(pblk);
	pblk_l2p_free(pblk);

	kfree(pblk);
}

static void pblk_tear_down(struct pblk *pblk)
{
933
	pblk_pipeline_stop(pblk);
934 935 936 937 938 939 940 941 942 943 944 945 946 947
	pblk_writer_stop(pblk);
	pblk_rb_sync_l2p(&pblk->rwb);
	pblk_rl_free(&pblk->rl);

	pr_debug("pblk: consistent tear down\n");
}

static void pblk_exit(void *private)
{
	struct pblk *pblk = private;

	down_write(&pblk_lock);
	pblk_gc_exit(pblk);
	pblk_tear_down(pblk);
948 949 950 951 952

#ifdef CONFIG_NVM_DEBUG
	pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	pblk_free(pblk);
	up_write(&pblk_lock);
}

static sector_t pblk_capacity(void *private)
{
	struct pblk *pblk = private;

	return pblk->capacity * NR_PHY_IN_LOG;
}

static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
		       int flags)
{
	struct nvm_geo *geo = &dev->geo;
	struct request_queue *bqueue = dev->q;
	struct request_queue *tqueue = tdisk->queue;
	struct pblk *pblk;
	int ret;

	if (dev->identity.dom & NVM_RSP_L2P) {
974
		pr_err("pblk: host-side L2P table not supported. (%x)\n",
975 976 977 978 979 980 981 982 983 984
							dev->identity.dom);
		return ERR_PTR(-EINVAL);
	}

	pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
	if (!pblk)
		return ERR_PTR(-ENOMEM);

	pblk->dev = dev;
	pblk->disk = tdisk;
985
	pblk->state = PBLK_STATE_RUNNING;
986
	pblk->gc.gc_enabled = 0;
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002

	spin_lock_init(&pblk->trans_lock);
	spin_lock_init(&pblk->lock);

	if (flags & NVM_TARGET_FACTORY)
		pblk_setup_uuid(pblk);

#ifdef CONFIG_NVM_DEBUG
	atomic_long_set(&pblk->inflight_writes, 0);
	atomic_long_set(&pblk->padded_writes, 0);
	atomic_long_set(&pblk->padded_wb, 0);
	atomic_long_set(&pblk->nr_flush, 0);
	atomic_long_set(&pblk->req_writes, 0);
	atomic_long_set(&pblk->sub_writes, 0);
	atomic_long_set(&pblk->sync_writes, 0);
	atomic_long_set(&pblk->inflight_reads, 0);
1003
	atomic_long_set(&pblk->cache_reads, 0);
1004 1005 1006 1007
	atomic_long_set(&pblk->sync_reads, 0);
	atomic_long_set(&pblk->recov_writes, 0);
	atomic_long_set(&pblk->recov_writes, 0);
	atomic_long_set(&pblk->recov_gc_writes, 0);
1008
	atomic_long_set(&pblk->recov_gc_reads, 0);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
#endif

	atomic_long_set(&pblk->read_failed, 0);
	atomic_long_set(&pblk->read_empty, 0);
	atomic_long_set(&pblk->read_high_ecc, 0);
	atomic_long_set(&pblk->read_failed_gc, 0);
	atomic_long_set(&pblk->write_failed, 0);
	atomic_long_set(&pblk->erase_failed, 0);

	ret = pblk_luns_init(pblk, dev->luns);
	if (ret) {
		pr_err("pblk: could not initialize luns\n");
		goto fail;
	}

	ret = pblk_lines_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize lines\n");
		goto fail_free_luns;
	}

	ret = pblk_core_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize core\n");
		goto fail_free_line_meta;
	}

	ret = pblk_l2p_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize maps\n");
		goto fail_free_core;
	}

	ret = pblk_lines_configure(pblk, flags);
	if (ret) {
		pr_err("pblk: could not configure lines\n");
		goto fail_free_l2p;
	}

	ret = pblk_writer_init(pblk);
	if (ret) {
1050 1051
		if (ret != -EINTR)
			pr_err("pblk: could not initialize write thread\n");
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
		goto fail_free_lines;
	}

	ret = pblk_gc_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize gc\n");
		goto fail_stop_writer;
	}

	/* inherit the size from the underlying device */
	blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));

	blk_queue_write_cache(tqueue, true, false);

1067
	tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
1068 1069
	tqueue->limits.discard_alignment = 0;
	blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1070
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1071

1072 1073
	pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
			tdisk->disk_name,
1074
			geo->all_luns, pblk->l_mg.nr_lines,
1075 1076 1077 1078
			(unsigned long long)pblk->rl.nr_secs,
			pblk->rwb.nr_entries);

	wake_up_process(pblk->writer_ts);
1079 1080 1081 1082

	/* Check if we need to start GC */
	pblk_gc_should_kick(pblk);

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	return pblk;

fail_stop_writer:
	pblk_writer_stop(pblk);
fail_free_lines:
	pblk_lines_free(pblk);
fail_free_l2p:
	pblk_l2p_free(pblk);
fail_free_core:
	pblk_core_free(pblk);
fail_free_line_meta:
	pblk_line_meta_free(pblk);
fail_free_luns:
	pblk_luns_free(pblk);
fail:
	kfree(pblk);
	return ERR_PTR(ret);
}

/* physical block device target */
static struct nvm_tgt_type tt_pblk = {
	.name		= "pblk",
	.version	= {1, 0, 0},

	.make_rq	= pblk_make_rq,
	.capacity	= pblk_capacity,

	.init		= pblk_init,
	.exit		= pblk_exit,

	.sysfs_init	= pblk_sysfs_init,
	.sysfs_exit	= pblk_sysfs_exit,
1115
	.owner		= THIS_MODULE,
1116 1117 1118 1119
};

static int __init pblk_module_init(void)
{
1120 1121 1122 1123 1124 1125 1126 1127 1128
	int ret;

	pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
	if (!pblk_bio_set)
		return -ENOMEM;
	ret = nvm_register_tgt_type(&tt_pblk);
	if (ret)
		bioset_free(pblk_bio_set);
	return ret;
1129 1130 1131 1132
}

static void pblk_module_exit(void)
{
1133
	bioset_free(pblk_bio_set);
1134 1135 1136 1137 1138 1139 1140 1141 1142
	nvm_unregister_tgt_type(&tt_pblk);
}

module_init(pblk_module_init);
module_exit(pblk_module_exit);
MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");