target_core_rd.c 19.2 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_rd.c
 *
 * This file contains the Storage Engine <-> Ramdisk transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>

#include <target/target_core_base.h>
36
#include <target/target_core_backend.h>
37
#include <target/target_core_backend_configfs.h>
38 39 40

#include "target_core_rd.h"

41 42 43 44
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
	return container_of(dev, struct rd_dev, dev);
}
45 46 47 48 49 50 51 52 53 54

/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
	struct rd_host *rd_host;

	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
55 56
	if (!rd_host) {
		pr_err("Unable to allocate memory for struct rd_host\n");
57 58 59 60 61
		return -ENOMEM;
	}

	rd_host->rd_host_id = host_id;

62
	hba->hba_ptr = rd_host;
63

64
	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 66 67 68 69 70 71 72 73 74
		" Generic Target Core Stack %s\n", hba->hba_id,
		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);

	return 0;
}

static void rd_detach_hba(struct se_hba *hba)
{
	struct rd_host *rd_host = hba->hba_ptr;

75
	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76 77 78 79 80 81
		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);

	kfree(rd_host);
	hba->hba_ptr = NULL;
}

82 83
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 sg_table_count)
84 85 86
{
	struct page *pg;
	struct scatterlist *sg;
87
	u32 i, j, page_count = 0, sg_per_table;
88

89
	for (i = 0; i < sg_table_count; i++) {
90 91 92 93 94
		sg = sg_table[i].sg_table;
		sg_per_table = sg_table[i].rd_sg_count;

		for (j = 0; j < sg_per_table; j++) {
			pg = sg_page(&sg[j]);
95
			if (pg) {
96 97 98 99 100 101 102
				__free_page(pg);
				page_count++;
			}
		}
		kfree(sg);
	}

103 104 105 106 107 108 109 110 111 112 113 114 115 116
	kfree(sg_table);
	return page_count;
}

static void rd_release_device_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
					  rd_dev->sg_table_count);

117
	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
118 119 120 121 122 123 124 125 126 127 128 129 130
		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_table_array = NULL;
	rd_dev->sg_table_count = 0;
}


/*	rd_build_device_space():
 *
 *
 */
131 132
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 total_sg_needed, unsigned char init_payload)
133
{
134
	u32 i = 0, j, page_offset = 0, sg_per_table;
135 136 137 138
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	struct page *pg;
	struct scatterlist *sg;
139
	unsigned char *p;
140 141

	while (total_sg_needed) {
142 143
		unsigned int chain_entry = 0;

144 145 146
		sg_per_table = (total_sg_needed > max_sg_per_table) ?
			max_sg_per_table : total_sg_needed;

147 148 149 150 151 152 153 154 155 156 157
#ifdef CONFIG_ARCH_HAS_SG_CHAIN

		/*
		 * Reserve extra element for chain entry
		 */
		if (sg_per_table < total_sg_needed)
			chain_entry = 1;

#endif /* CONFIG_ARCH_HAS_SG_CHAIN */

		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
158
				GFP_KERNEL);
159 160
		if (!sg) {
			pr_err("Unable to allocate scatterlist array"
161
				" for struct rd_dev\n");
162
			return -ENOMEM;
163 164
		}

165 166 167 168 169 170 171 172 173 174
		sg_init_table(sg, sg_per_table + chain_entry);

#ifdef CONFIG_ARCH_HAS_SG_CHAIN

		if (i > 0) {
			sg_chain(sg_table[i - 1].sg_table,
				 max_sg_per_table + 1, sg);
		}

#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
175 176 177 178 179 180 181 182 183

		sg_table[i].sg_table = sg;
		sg_table[i].rd_sg_count = sg_per_table;
		sg_table[i].page_start_offset = page_offset;
		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
						- 1;

		for (j = 0; j < sg_per_table; j++) {
			pg = alloc_pages(GFP_KERNEL, 0);
184 185
			if (!pg) {
				pr_err("Unable to allocate scatterlist"
186
					" pages for struct rd_dev_sg_table\n");
187
				return -ENOMEM;
188 189 190
			}
			sg_assign_page(&sg[j], pg);
			sg[j].length = PAGE_SIZE;
191 192 193 194

			p = kmap(pg);
			memset(p, init_payload, PAGE_SIZE);
			kunmap(pg);
195 196 197 198 199 200
		}

		page_offset += sg_per_table;
		total_sg_needed -= sg_per_table;
	}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
	return 0;
}

static int rd_build_device_space(struct rd_dev *rd_dev)
{
	struct rd_dev_sg_table *sg_table;
	u32 sg_tables, total_sg_needed;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_page_count <= 0) {
		pr_err("Illegal page count: %u for Ramdisk device\n",
		       rd_dev->rd_page_count);
		return -EINVAL;
	}

	/* Don't need backing pages for NULLIO */
	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;

	total_sg_needed = rd_dev->rd_page_count;

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_table_array = sg_table;
	rd_dev->sg_table_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
	if (rc)
		return rc;

240
	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
241 242 243
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
		 rd_dev->sg_table_count);
244 245 246 247

	return 0;
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static void rd_release_prot_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
					  rd_dev->sg_prot_count);

	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_prot_array = NULL;
	rd_dev->sg_prot_count = 0;
}

267
static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
268 269 270 271 272 273 274 275 276
{
	struct rd_dev_sg_table *sg_table;
	u32 total_sg_needed, sg_tables;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;
277 278 279 280 281 282 283
	/*
	 * prot_length=8byte dif data
	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
	 * 		   (prot_length/block_size) + pad
	 * PGSZ canceled each other.
	 */
	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk protection"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_prot_array = sg_table;
	rd_dev->sg_prot_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
	if (rc)
		return rc;

	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);

	return 0;
}

308
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
309 310 311 312 313
{
	struct rd_dev *rd_dev;
	struct rd_host *rd_host = hba->hba_ptr;

	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
314 315
	if (!rd_dev) {
		pr_err("Unable to allocate memory for struct rd_dev\n");
316 317 318 319 320
		return NULL;
	}

	rd_dev->rd_host = rd_host;

321
	return &rd_dev->dev;
322 323
}

324
static int rd_configure_device(struct se_device *dev)
325
{
326 327 328
	struct rd_dev *rd_dev = RD_DEV(dev);
	struct rd_host *rd_host = dev->se_hba->hba_ptr;
	int ret;
329

330 331 332 333
	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
		pr_debug("Missing rd_pages= parameter\n");
		return -EINVAL;
	}
334

335 336
	ret = rd_build_device_space(rd_dev);
	if (ret < 0)
337 338
		goto fail;

339 340 341
	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
342 343 344

	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;

345
	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
346
		" %u pages in %u tables, %lu total bytes\n",
347
		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
348 349 350
		rd_dev->sg_table_count,
		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));

351
	return 0;
352 353 354

fail:
	rd_release_device_space(rd_dev);
355
	return ret;
356 357
}

358
static void rd_free_device(struct se_device *dev)
359
{
360
	struct rd_dev *rd_dev = RD_DEV(dev);
361 362 363 364 365 366 367 368

	rd_release_device_space(rd_dev);
	kfree(rd_dev);
}

static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
369 370
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
371

372 373
	i = page / sg_per_table;
	if (i < rd_dev->sg_table_count) {
374 375 376 377 378 379
		sg_table = &rd_dev->sg_table_array[i];
		if ((sg_table->page_start_offset <= page) &&
		    (sg_table->page_end_offset >= page))
			return sg_table;
	}

380
	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
381 382 383 384 385
			page);

	return NULL;
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));

	i = page / sg_per_table;
	if (i < rd_dev->sg_prot_count) {
		sg_table = &rd_dev->sg_prot_array[i];
		if ((sg_table->page_start_offset <= page) &&
		     (sg_table->page_end_offset >= page))
			return sg_table;
	}

	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
			page);

	return NULL;
}

406 407 408 409 410 411 412 413
typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
				     unsigned int, struct scatterlist *, int);

static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
{
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *prot_table;
414
	bool need_to_release = false;
415 416 417
	struct scatterlist *prot_sg;
	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
	u32 prot_offset, prot_page;
418
	u32 prot_npages __maybe_unused;
419
	u64 tmp;
420
	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421 422 423 424 425 426 427 428 429 430 431 432

	tmp = cmd->t_task_lba * se_dev->prot_length;
	prot_offset = do_div(tmp, PAGE_SIZE);
	prot_page = tmp;

	prot_table = rd_get_prot_table(dev, prot_page);
	if (!prot_table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	prot_sg = &prot_table->sg_table[prot_page -
					prot_table->page_start_offset];

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
#ifndef CONFIG_ARCH_HAS_SG_CHAIN

	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
				   PAGE_SIZE);

	/*
	 * Allocate temporaly contiguous scatterlist entries if prot pages
	 * straddles multiple scatterlist tables.
	 */
	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
		int i;

		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
		if (!prot_sg)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		need_to_release = true;
		sg_init_table(prot_sg, prot_npages);

		for (i = 0; i < prot_npages; i++) {
			if (prot_page + i > prot_table->page_end_offset) {
				prot_table = rd_get_prot_table(dev,
								prot_page + i);
				if (!prot_table) {
					kfree(prot_sg);
					return rc;
				}
				sg_unmark_end(&prot_sg[i - 1]);
			}
			prot_sg[i] = prot_table->sg_table[prot_page + i -
						prot_table->page_start_offset];
		}
	}

#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */

469
	rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
470 471
	if (need_to_release)
		kfree(prot_sg);
472 473 474 475

	return rc;
}

476
static sense_reason_t
477 478
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
	      enum dma_data_direction data_direction)
479
{
480
	struct se_device *se_dev = cmd->se_dev;
481
	struct rd_dev *dev = RD_DEV(se_dev);
482
	struct rd_dev_sg_table *table;
483 484
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
485 486 487
	u32 rd_offset;
	u32 rd_size;
	u32 rd_page;
488
	u32 src_len;
489
	u64 tmp;
490
	sense_reason_t rc;
491

492 493 494 495 496
	if (dev->rd_flags & RDF_NULLIO) {
		target_complete_cmd(cmd, SAM_STAT_GOOD);
		return 0;
	}

497
	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
498 499
	rd_offset = do_div(tmp, PAGE_SIZE);
	rd_page = tmp;
500
	rd_size = cmd->data_length;
501 502

	table = rd_get_sg_table(dev, rd_page);
503
	if (!table)
504
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
505

506
	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
507

508
	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
509
			dev->rd_dev_id,
510 511
			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
			cmd->t_task_lba, rd_size, rd_page, rd_offset);
512

513 514
	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
	    data_direction == DMA_TO_DEVICE) {
515
		rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
516 517 518 519
		if (rc)
			return rc;
	}

520
	src_len = PAGE_SIZE - rd_offset;
521 522
	sg_miter_start(&m, sgl, sgl_nents,
			data_direction == DMA_FROM_DEVICE ?
523 524
				SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (rd_size) {
525 526
		u32 len;
		void *rd_addr;
527

528
		sg_miter_next(&m);
529 530 531 532 533 534
		if (!(u32)m.length) {
			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
				 dev->rd_dev_id, m.addr, m.length);
			sg_miter_stop(&m);
			return TCM_INCORRECT_AMOUNT_OF_DATA;
		}
535
		len = min((u32)m.length, src_len);
536 537 538 539 540 541
		if (len > rd_size) {
			pr_debug("RD[%u]: size underrun page %d offset %d "
				 "size %d\n", dev->rd_dev_id,
				 rd_page, rd_offset, rd_size);
			len = rd_size;
		}
542
		m.consumed = len;
543

544
		rd_addr = sg_virt(rd_sg) + rd_offset;
545

546
		if (data_direction == DMA_FROM_DEVICE)
547 548 549
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);
550

551 552
		rd_size -= len;
		if (!rd_size)
553 554
			continue;

555 556 557
		src_len -= len;
		if (src_len) {
			rd_offset += len;
558 559
			continue;
		}
560

561
		/* rd page completed, next one please */
562
		rd_page++;
563 564
		rd_offset = 0;
		src_len = PAGE_SIZE;
565
		if (rd_page <= table->page_end_offset) {
566
			rd_sg++;
567 568
			continue;
		}
569

570
		table = rd_get_sg_table(dev, rd_page);
571 572
		if (!table) {
			sg_miter_stop(&m);
573
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
574
		}
575

576 577
		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
578
	}
579
	sg_miter_stop(&m);
580

581 582
	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
	    data_direction == DMA_FROM_DEVICE) {
583
		rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
584 585 586 587
		if (rc)
			return rc;
	}

588
	target_complete_cmd(cmd, SAM_STAT_GOOD);
589
	return 0;
590 591 592
}

enum {
593
	Opt_rd_pages, Opt_rd_nullio, Opt_err
594 595 596 597
};

static match_table_t tokens = {
	{Opt_rd_pages, "rd_pages=%d"},
598
	{Opt_rd_nullio, "rd_nullio=%d"},
599 600 601
	{Opt_err, NULL}
};

602 603
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
604
{
605
	struct rd_dev *rd_dev = RD_DEV(dev);
606 607 608 609 610 611 612 613 614 615
	char *orig, *ptr, *opts;
	substring_t args[MAX_OPT_ARGS];
	int ret = 0, arg, token;

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

616
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
617 618 619 620 621 622 623 624
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_rd_pages:
			match_int(args, &arg);
			rd_dev->rd_page_count = arg;
625
			pr_debug("RAMDISK: Referencing Page"
626 627 628
				" Count: %u\n", rd_dev->rd_page_count);
			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
			break;
629 630 631 632 633 634 635 636
		case Opt_rd_nullio:
			match_int(args, &arg);
			if (arg != 1)
				break;

			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
			rd_dev->rd_flags |= RDF_NULLIO;
			break;
637 638 639 640 641 642 643 644 645
		default:
			break;
		}
	}

	kfree(orig);
	return (!ret) ? count : ret;
}

646
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
647
{
648
	struct rd_dev *rd_dev = RD_DEV(dev);
649

650 651
	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
			rd_dev->rd_dev_id);
652
	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
653 654 655
			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
			PAGE_SIZE, rd_dev->sg_table_count,
			!!(rd_dev->rd_flags & RDF_NULLIO));
656 657 658 659 660
	return bl;
}

static sector_t rd_get_blocks(struct se_device *dev)
{
661 662
	struct rd_dev *rd_dev = RD_DEV(dev);

663
	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
664
			dev->dev_attrib.block_size) - 1;
665 666 667 668

	return blocks_long;
}

669 670 671 672 673 674 675
static int rd_init_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

        if (!dev->dev_attrib.pi_prot_type)
		return 0;

676 677
	return rd_build_prot_space(rd_dev, dev->prot_length,
				   dev->dev_attrib.block_size);
678 679 680 681 682 683 684 685 686
}

static void rd_free_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

	rd_release_prot_space(rd_dev);
}

C
Christoph Hellwig 已提交
687
static struct sbc_ops rd_sbc_ops = {
688 689 690
	.execute_rw		= rd_execute_rw,
};

691 692
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
693
{
C
Christoph Hellwig 已提交
694
	return sbc_parse_cdb(cmd, &rd_sbc_ops);
695 696
}

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
DEF_TB_DEFAULT_ATTRIBS(rd_mcp);

static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
	&rd_mcp_dev_attrib_emulate_model_alias.attr,
	&rd_mcp_dev_attrib_emulate_dpo.attr,
	&rd_mcp_dev_attrib_emulate_fua_write.attr,
	&rd_mcp_dev_attrib_emulate_fua_read.attr,
	&rd_mcp_dev_attrib_emulate_write_cache.attr,
	&rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
	&rd_mcp_dev_attrib_emulate_tas.attr,
	&rd_mcp_dev_attrib_emulate_tpu.attr,
	&rd_mcp_dev_attrib_emulate_tpws.attr,
	&rd_mcp_dev_attrib_emulate_caw.attr,
	&rd_mcp_dev_attrib_emulate_3pc.attr,
	&rd_mcp_dev_attrib_pi_prot_type.attr,
	&rd_mcp_dev_attrib_hw_pi_prot_type.attr,
	&rd_mcp_dev_attrib_pi_prot_format.attr,
	&rd_mcp_dev_attrib_enforce_pr_isids.attr,
	&rd_mcp_dev_attrib_is_nonrot.attr,
	&rd_mcp_dev_attrib_emulate_rest_reord.attr,
	&rd_mcp_dev_attrib_force_pr_aptpl.attr,
	&rd_mcp_dev_attrib_hw_block_size.attr,
	&rd_mcp_dev_attrib_block_size.attr,
	&rd_mcp_dev_attrib_hw_max_sectors.attr,
	&rd_mcp_dev_attrib_optimal_sectors.attr,
	&rd_mcp_dev_attrib_hw_queue_depth.attr,
	&rd_mcp_dev_attrib_queue_depth.attr,
	&rd_mcp_dev_attrib_max_unmap_lba_count.attr,
	&rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
	&rd_mcp_dev_attrib_unmap_granularity.attr,
	&rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
	&rd_mcp_dev_attrib_max_write_same_len.attr,
	NULL,
};

732 733
static struct se_subsystem_api rd_mcp_template = {
	.name			= "rd_mcp",
734 735
	.inquiry_prod		= "RAMDISK-MCP",
	.inquiry_rev		= RD_MCP_VERSION,
736 737
	.attach_hba		= rd_attach_hba,
	.detach_hba		= rd_detach_hba,
738 739
	.alloc_device		= rd_alloc_device,
	.configure_device	= rd_configure_device,
740
	.free_device		= rd_free_device,
741
	.parse_cdb		= rd_parse_cdb,
742 743
	.set_configfs_dev_params = rd_set_configfs_dev_params,
	.show_configfs_dev_params = rd_show_configfs_dev_params,
744
	.get_device_type	= sbc_get_device_type,
745
	.get_blocks		= rd_get_blocks,
746 747
	.init_prot		= rd_init_prot,
	.free_prot		= rd_free_prot,
748 749 750 751
};

int __init rd_module_init(void)
{
752
	struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
753 754
	int ret;

755 756 757
	target_core_setup_sub_cits(&rd_mcp_template);
	tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;

758 759 760 761 762 763 764 765 766 767 768 769
	ret = transport_subsystem_register(&rd_mcp_template);
	if (ret < 0) {
		return ret;
	}

	return 0;
}

void rd_module_exit(void)
{
	transport_subsystem_release(&rd_mcp_template);
}