target_core_rd.c 17.6 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_rd.c
 *
 * This file contains the Storage Engine <-> Ramdisk transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>

#include <target/target_core_base.h>
36
#include <target/target_core_backend.h>
37 38 39

#include "target_core_rd.h"

40 41 42 43
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
	return container_of(dev, struct rd_dev, dev);
}
44 45 46 47 48 49

static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
	struct rd_host *rd_host;

	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
50 51
	if (!rd_host) {
		pr_err("Unable to allocate memory for struct rd_host\n");
52 53 54 55 56
		return -ENOMEM;
	}

	rd_host->rd_host_id = host_id;

57
	hba->hba_ptr = rd_host;
58

59
	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
60 61 62 63 64 65 66 67 68 69
		" Generic Target Core Stack %s\n", hba->hba_id,
		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);

	return 0;
}

static void rd_detach_hba(struct se_hba *hba)
{
	struct rd_host *rd_host = hba->hba_ptr;

70
	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
71 72 73 74 75 76
		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);

	kfree(rd_host);
	hba->hba_ptr = NULL;
}

77 78
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 sg_table_count)
79 80 81
{
	struct page *pg;
	struct scatterlist *sg;
82
	u32 i, j, page_count = 0, sg_per_table;
83

84
	for (i = 0; i < sg_table_count; i++) {
85 86 87 88 89
		sg = sg_table[i].sg_table;
		sg_per_table = sg_table[i].rd_sg_count;

		for (j = 0; j < sg_per_table; j++) {
			pg = sg_page(&sg[j]);
90
			if (pg) {
91 92 93 94 95 96 97
				__free_page(pg);
				page_count++;
			}
		}
		kfree(sg);
	}

98 99 100 101 102 103 104 105 106 107 108 109 110 111
	kfree(sg_table);
	return page_count;
}

static void rd_release_device_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
					  rd_dev->sg_table_count);

112
	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113 114 115 116 117 118 119 120 121 122 123 124 125
		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_table_array = NULL;
	rd_dev->sg_table_count = 0;
}


/*	rd_build_device_space():
 *
 *
 */
126 127
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 total_sg_needed, unsigned char init_payload)
128
{
129
	u32 i = 0, j, page_offset = 0, sg_per_table;
130 131 132 133
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	struct page *pg;
	struct scatterlist *sg;
134
	unsigned char *p;
135 136

	while (total_sg_needed) {
137 138
		unsigned int chain_entry = 0;

139 140 141
		sg_per_table = (total_sg_needed > max_sg_per_table) ?
			max_sg_per_table : total_sg_needed;

142 143 144 145 146 147 148 149 150 151 152
#ifdef CONFIG_ARCH_HAS_SG_CHAIN

		/*
		 * Reserve extra element for chain entry
		 */
		if (sg_per_table < total_sg_needed)
			chain_entry = 1;

#endif /* CONFIG_ARCH_HAS_SG_CHAIN */

		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
153
				GFP_KERNEL);
154 155
		if (!sg) {
			pr_err("Unable to allocate scatterlist array"
156
				" for struct rd_dev\n");
157
			return -ENOMEM;
158 159
		}

160 161 162 163 164 165 166 167 168 169
		sg_init_table(sg, sg_per_table + chain_entry);

#ifdef CONFIG_ARCH_HAS_SG_CHAIN

		if (i > 0) {
			sg_chain(sg_table[i - 1].sg_table,
				 max_sg_per_table + 1, sg);
		}

#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
170 171 172 173 174 175 176 177 178

		sg_table[i].sg_table = sg;
		sg_table[i].rd_sg_count = sg_per_table;
		sg_table[i].page_start_offset = page_offset;
		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
						- 1;

		for (j = 0; j < sg_per_table; j++) {
			pg = alloc_pages(GFP_KERNEL, 0);
179 180
			if (!pg) {
				pr_err("Unable to allocate scatterlist"
181
					" pages for struct rd_dev_sg_table\n");
182
				return -ENOMEM;
183 184 185
			}
			sg_assign_page(&sg[j], pg);
			sg[j].length = PAGE_SIZE;
186 187 188 189

			p = kmap(pg);
			memset(p, init_payload, PAGE_SIZE);
			kunmap(pg);
190 191 192 193 194 195
		}

		page_offset += sg_per_table;
		total_sg_needed -= sg_per_table;
	}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	return 0;
}

static int rd_build_device_space(struct rd_dev *rd_dev)
{
	struct rd_dev_sg_table *sg_table;
	u32 sg_tables, total_sg_needed;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_page_count <= 0) {
		pr_err("Illegal page count: %u for Ramdisk device\n",
		       rd_dev->rd_page_count);
		return -EINVAL;
	}

	/* Don't need backing pages for NULLIO */
	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;

	total_sg_needed = rd_dev->rd_page_count;

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_table_array = sg_table;
	rd_dev->sg_table_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
	if (rc)
		return rc;

235
	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
236 237 238
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
		 rd_dev->sg_table_count);
239 240 241 242

	return 0;
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
static void rd_release_prot_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
					  rd_dev->sg_prot_count);

	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_prot_array = NULL;
	rd_dev->sg_prot_count = 0;
}

262
static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
263 264 265 266 267 268 269 270 271
{
	struct rd_dev_sg_table *sg_table;
	u32 total_sg_needed, sg_tables;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;
272 273 274 275 276 277 278
	/*
	 * prot_length=8byte dif data
	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
	 * 		   (prot_length/block_size) + pad
	 * PGSZ canceled each other.
	 */
	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk protection"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_prot_array = sg_table;
	rd_dev->sg_prot_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
	if (rc)
		return rc;

	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);

	return 0;
}

303
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
304 305 306 307 308
{
	struct rd_dev *rd_dev;
	struct rd_host *rd_host = hba->hba_ptr;

	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
309 310
	if (!rd_dev) {
		pr_err("Unable to allocate memory for struct rd_dev\n");
311 312 313 314 315
		return NULL;
	}

	rd_dev->rd_host = rd_host;

316
	return &rd_dev->dev;
317 318
}

319
static int rd_configure_device(struct se_device *dev)
320
{
321 322 323
	struct rd_dev *rd_dev = RD_DEV(dev);
	struct rd_host *rd_host = dev->se_hba->hba_ptr;
	int ret;
324

325 326 327 328
	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
		pr_debug("Missing rd_pages= parameter\n");
		return -EINVAL;
	}
329

330 331
	ret = rd_build_device_space(rd_dev);
	if (ret < 0)
332 333
		goto fail;

334 335 336
	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
337 338 339

	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;

340
	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
341
		" %u pages in %u tables, %lu total bytes\n",
342
		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
343 344 345
		rd_dev->sg_table_count,
		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));

346
	return 0;
347 348 349

fail:
	rd_release_device_space(rd_dev);
350
	return ret;
351 352
}

353
static void rd_free_device(struct se_device *dev)
354
{
355
	struct rd_dev *rd_dev = RD_DEV(dev);
356 357 358 359 360 361 362 363

	rd_release_device_space(rd_dev);
	kfree(rd_dev);
}

static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
364 365
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
366

367 368
	i = page / sg_per_table;
	if (i < rd_dev->sg_table_count) {
369 370 371 372 373 374
		sg_table = &rd_dev->sg_table_array[i];
		if ((sg_table->page_start_offset <= page) &&
		    (sg_table->page_end_offset >= page))
			return sg_table;
	}

375
	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
376 377 378 379 380
			page);

	return NULL;
}

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));

	i = page / sg_per_table;
	if (i < rd_dev->sg_prot_count) {
		sg_table = &rd_dev->sg_prot_array[i];
		if ((sg_table->page_start_offset <= page) &&
		     (sg_table->page_end_offset >= page))
			return sg_table;
	}

	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
			page);

	return NULL;
}

401
static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
402 403 404 405
{
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *prot_table;
406
	bool need_to_release = false;
407 408 409
	struct scatterlist *prot_sg;
	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
	u32 prot_offset, prot_page;
410
	u32 prot_npages __maybe_unused;
411
	u64 tmp;
412
	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
413 414 415 416 417 418 419 420 421 422 423 424

	tmp = cmd->t_task_lba * se_dev->prot_length;
	prot_offset = do_div(tmp, PAGE_SIZE);
	prot_page = tmp;

	prot_table = rd_get_prot_table(dev, prot_page);
	if (!prot_table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	prot_sg = &prot_table->sg_table[prot_page -
					prot_table->page_start_offset];

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
#ifndef CONFIG_ARCH_HAS_SG_CHAIN

	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
				   PAGE_SIZE);

	/*
	 * Allocate temporaly contiguous scatterlist entries if prot pages
	 * straddles multiple scatterlist tables.
	 */
	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
		int i;

		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
		if (!prot_sg)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		need_to_release = true;
		sg_init_table(prot_sg, prot_npages);

		for (i = 0; i < prot_npages; i++) {
			if (prot_page + i > prot_table->page_end_offset) {
				prot_table = rd_get_prot_table(dev,
								prot_page + i);
				if (!prot_table) {
					kfree(prot_sg);
					return rc;
				}
				sg_unmark_end(&prot_sg[i - 1]);
			}
			prot_sg[i] = prot_table->sg_table[prot_page + i -
						prot_table->page_start_offset];
		}
	}

#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */

461 462 463 464 465 466 467 468 469 470
	if (is_read)
		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
				    prot_sg, prot_offset);
	else
		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
				    cmd->t_prot_sg, 0);

	if (!rc)
		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);

471 472
	if (need_to_release)
		kfree(prot_sg);
473 474 475 476

	return rc;
}

477
static sense_reason_t
478 479
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
	      enum dma_data_direction data_direction)
480
{
481
	struct se_device *se_dev = cmd->se_dev;
482
	struct rd_dev *dev = RD_DEV(se_dev);
483
	struct rd_dev_sg_table *table;
484 485
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
486 487 488
	u32 rd_offset;
	u32 rd_size;
	u32 rd_page;
489
	u32 src_len;
490
	u64 tmp;
491
	sense_reason_t rc;
492

493 494 495 496 497
	if (dev->rd_flags & RDF_NULLIO) {
		target_complete_cmd(cmd, SAM_STAT_GOOD);
		return 0;
	}

498
	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
499 500
	rd_offset = do_div(tmp, PAGE_SIZE);
	rd_page = tmp;
501
	rd_size = cmd->data_length;
502 503

	table = rd_get_sg_table(dev, rd_page);
504
	if (!table)
505
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
506

507
	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
508

509
	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
510
			dev->rd_dev_id,
511 512
			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
			cmd->t_task_lba, rd_size, rd_page, rd_offset);
513

514 515
	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
	    data_direction == DMA_TO_DEVICE) {
516
		rc = rd_do_prot_rw(cmd, false);
517 518 519 520
		if (rc)
			return rc;
	}

521
	src_len = PAGE_SIZE - rd_offset;
522 523
	sg_miter_start(&m, sgl, sgl_nents,
			data_direction == DMA_FROM_DEVICE ?
524 525
				SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (rd_size) {
526 527
		u32 len;
		void *rd_addr;
528

529
		sg_miter_next(&m);
530 531 532 533 534 535
		if (!(u32)m.length) {
			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
				 dev->rd_dev_id, m.addr, m.length);
			sg_miter_stop(&m);
			return TCM_INCORRECT_AMOUNT_OF_DATA;
		}
536
		len = min((u32)m.length, src_len);
537 538 539 540 541 542
		if (len > rd_size) {
			pr_debug("RD[%u]: size underrun page %d offset %d "
				 "size %d\n", dev->rd_dev_id,
				 rd_page, rd_offset, rd_size);
			len = rd_size;
		}
543
		m.consumed = len;
544

545
		rd_addr = sg_virt(rd_sg) + rd_offset;
546

547
		if (data_direction == DMA_FROM_DEVICE)
548 549 550
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);
551

552 553
		rd_size -= len;
		if (!rd_size)
554 555
			continue;

556 557 558
		src_len -= len;
		if (src_len) {
			rd_offset += len;
559 560
			continue;
		}
561

562
		/* rd page completed, next one please */
563
		rd_page++;
564 565
		rd_offset = 0;
		src_len = PAGE_SIZE;
566
		if (rd_page <= table->page_end_offset) {
567
			rd_sg++;
568 569
			continue;
		}
570

571
		table = rd_get_sg_table(dev, rd_page);
572 573
		if (!table) {
			sg_miter_stop(&m);
574
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
575
		}
576

577 578
		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
579
	}
580
	sg_miter_stop(&m);
581

582 583
	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
	    data_direction == DMA_FROM_DEVICE) {
584
		rc = rd_do_prot_rw(cmd, true);
585 586 587 588
		if (rc)
			return rc;
	}

589
	target_complete_cmd(cmd, SAM_STAT_GOOD);
590
	return 0;
591 592 593
}

enum {
594
	Opt_rd_pages, Opt_rd_nullio, Opt_err
595 596 597 598
};

static match_table_t tokens = {
	{Opt_rd_pages, "rd_pages=%d"},
599
	{Opt_rd_nullio, "rd_nullio=%d"},
600 601 602
	{Opt_err, NULL}
};

603 604
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
605
{
606
	struct rd_dev *rd_dev = RD_DEV(dev);
607 608 609 610 611 612 613 614 615 616
	char *orig, *ptr, *opts;
	substring_t args[MAX_OPT_ARGS];
	int ret = 0, arg, token;

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

617
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
618 619 620 621 622 623 624 625
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_rd_pages:
			match_int(args, &arg);
			rd_dev->rd_page_count = arg;
626
			pr_debug("RAMDISK: Referencing Page"
627 628 629
				" Count: %u\n", rd_dev->rd_page_count);
			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
			break;
630 631 632 633 634 635 636 637
		case Opt_rd_nullio:
			match_int(args, &arg);
			if (arg != 1)
				break;

			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
			rd_dev->rd_flags |= RDF_NULLIO;
			break;
638 639 640 641 642 643 644 645 646
		default:
			break;
		}
	}

	kfree(orig);
	return (!ret) ? count : ret;
}

647
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
648
{
649
	struct rd_dev *rd_dev = RD_DEV(dev);
650

651 652
	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
			rd_dev->rd_dev_id);
653
	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
654 655 656
			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
			PAGE_SIZE, rd_dev->sg_table_count,
			!!(rd_dev->rd_flags & RDF_NULLIO));
657 658 659 660 661
	return bl;
}

static sector_t rd_get_blocks(struct se_device *dev)
{
662 663
	struct rd_dev *rd_dev = RD_DEV(dev);

664
	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
665
			dev->dev_attrib.block_size) - 1;
666 667 668 669

	return blocks_long;
}

670 671 672 673 674 675 676
static int rd_init_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

        if (!dev->dev_attrib.pi_prot_type)
		return 0;

677 678
	return rd_build_prot_space(rd_dev, dev->prot_length,
				   dev->dev_attrib.block_size);
679 680 681 682 683 684 685 686 687
}

static void rd_free_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

	rd_release_prot_space(rd_dev);
}

C
Christoph Hellwig 已提交
688
static struct sbc_ops rd_sbc_ops = {
689 690 691
	.execute_rw		= rd_execute_rw,
};

692 693
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
694
{
C
Christoph Hellwig 已提交
695
	return sbc_parse_cdb(cmd, &rd_sbc_ops);
696 697
}

698
static const struct target_backend_ops rd_mcp_ops = {
699
	.name			= "rd_mcp",
700 701
	.inquiry_prod		= "RAMDISK-MCP",
	.inquiry_rev		= RD_MCP_VERSION,
702 703
	.attach_hba		= rd_attach_hba,
	.detach_hba		= rd_detach_hba,
704 705
	.alloc_device		= rd_alloc_device,
	.configure_device	= rd_configure_device,
706
	.free_device		= rd_free_device,
707
	.parse_cdb		= rd_parse_cdb,
708 709
	.set_configfs_dev_params = rd_set_configfs_dev_params,
	.show_configfs_dev_params = rd_show_configfs_dev_params,
710
	.get_device_type	= sbc_get_device_type,
711
	.get_blocks		= rd_get_blocks,
712 713
	.init_prot		= rd_init_prot,
	.free_prot		= rd_free_prot,
714
	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
715 716 717 718
};

int __init rd_module_init(void)
{
719
	return transport_backend_register(&rd_mcp_ops);
720 721 722 723
}

void rd_module_exit(void)
{
724
	target_backend_unregister(&rd_mcp_ops);
725
}