target_core_rd.c 16.1 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_rd.c
 *
 * This file contains the Storage Engine <-> Ramdisk transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>

#include <target/target_core_base.h>
36
#include <target/target_core_backend.h>
37 38 39

#include "target_core_rd.h"

40 41 42 43
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
	return container_of(dev, struct rd_dev, dev);
}
44 45 46 47 48 49 50 51 52 53

/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
	struct rd_host *rd_host;

	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
54 55
	if (!rd_host) {
		pr_err("Unable to allocate memory for struct rd_host\n");
56 57 58 59 60
		return -ENOMEM;
	}

	rd_host->rd_host_id = host_id;

61
	hba->hba_ptr = rd_host;
62

63
	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
64 65 66 67 68 69 70 71 72 73
		" Generic Target Core Stack %s\n", hba->hba_id,
		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);

	return 0;
}

static void rd_detach_hba(struct se_hba *hba)
{
	struct rd_host *rd_host = hba->hba_ptr;

74
	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
75 76 77 78 79 80
		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);

	kfree(rd_host);
	hba->hba_ptr = NULL;
}

81 82
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 sg_table_count)
83 84 85
{
	struct page *pg;
	struct scatterlist *sg;
86
	u32 i, j, page_count = 0, sg_per_table;
87

88
	for (i = 0; i < sg_table_count; i++) {
89 90 91 92 93
		sg = sg_table[i].sg_table;
		sg_per_table = sg_table[i].rd_sg_count;

		for (j = 0; j < sg_per_table; j++) {
			pg = sg_page(&sg[j]);
94
			if (pg) {
95 96 97 98 99 100 101
				__free_page(pg);
				page_count++;
			}
		}
		kfree(sg);
	}

102 103 104 105 106 107 108 109 110 111 112 113 114 115
	kfree(sg_table);
	return page_count;
}

static void rd_release_device_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
					  rd_dev->sg_table_count);

116
	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117 118 119 120 121 122 123 124 125 126 127 128 129
		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_table_array = NULL;
	rd_dev->sg_table_count = 0;
}


/*	rd_build_device_space():
 *
 *
 */
130 131
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
				 u32 total_sg_needed, unsigned char init_payload)
132
{
133
	u32 i = 0, j, page_offset = 0, sg_per_table;
134 135 136 137
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	struct page *pg;
	struct scatterlist *sg;
138
	unsigned char *p;
139 140 141 142 143 144 145

	while (total_sg_needed) {
		sg_per_table = (total_sg_needed > max_sg_per_table) ?
			max_sg_per_table : total_sg_needed;

		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
				GFP_KERNEL);
146 147
		if (!sg) {
			pr_err("Unable to allocate scatterlist array"
148
				" for struct rd_dev\n");
149
			return -ENOMEM;
150 151
		}

152
		sg_init_table(sg, sg_per_table);
153 154 155 156 157 158 159 160 161

		sg_table[i].sg_table = sg;
		sg_table[i].rd_sg_count = sg_per_table;
		sg_table[i].page_start_offset = page_offset;
		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
						- 1;

		for (j = 0; j < sg_per_table; j++) {
			pg = alloc_pages(GFP_KERNEL, 0);
162 163
			if (!pg) {
				pr_err("Unable to allocate scatterlist"
164
					" pages for struct rd_dev_sg_table\n");
165
				return -ENOMEM;
166 167 168
			}
			sg_assign_page(&sg[j], pg);
			sg[j].length = PAGE_SIZE;
169 170 171 172

			p = kmap(pg);
			memset(p, init_payload, PAGE_SIZE);
			kunmap(pg);
173 174 175 176 177 178
		}

		page_offset += sg_per_table;
		total_sg_needed -= sg_per_table;
	}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
	return 0;
}

static int rd_build_device_space(struct rd_dev *rd_dev)
{
	struct rd_dev_sg_table *sg_table;
	u32 sg_tables, total_sg_needed;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_page_count <= 0) {
		pr_err("Illegal page count: %u for Ramdisk device\n",
		       rd_dev->rd_page_count);
		return -EINVAL;
	}

	/* Don't need backing pages for NULLIO */
	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;

	total_sg_needed = rd_dev->rd_page_count;

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_table_array = sg_table;
	rd_dev->sg_table_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
	if (rc)
		return rc;

218
	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
219 220 221
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
		 rd_dev->sg_table_count);
222 223 224 225

	return 0;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static void rd_release_prot_space(struct rd_dev *rd_dev)
{
	u32 page_count;

	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
		return;

	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
					  rd_dev->sg_prot_count);

	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	rd_dev->sg_prot_array = NULL;
	rd_dev->sg_prot_count = 0;
}

static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
{
	struct rd_dev_sg_table *sg_table;
	u32 total_sg_needed, sg_tables;
	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
	int rc;

	if (rd_dev->rd_flags & RDF_NULLIO)
		return 0;

	total_sg_needed = rd_dev->rd_page_count / prot_length;

	sg_tables = (total_sg_needed / max_sg_per_table) + 1;

	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
	if (!sg_table) {
		pr_err("Unable to allocate memory for Ramdisk protection"
		       " scatterlist tables\n");
		return -ENOMEM;
	}

	rd_dev->sg_prot_array = sg_table;
	rd_dev->sg_prot_count = sg_tables;

	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
	if (rc)
		return rc;

	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);

	return 0;
}

281
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
282 283 284 285 286
{
	struct rd_dev *rd_dev;
	struct rd_host *rd_host = hba->hba_ptr;

	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
287 288
	if (!rd_dev) {
		pr_err("Unable to allocate memory for struct rd_dev\n");
289 290 291 292 293
		return NULL;
	}

	rd_dev->rd_host = rd_host;

294
	return &rd_dev->dev;
295 296
}

297
static int rd_configure_device(struct se_device *dev)
298
{
299 300 301
	struct rd_dev *rd_dev = RD_DEV(dev);
	struct rd_host *rd_host = dev->se_hba->hba_ptr;
	int ret;
302

303 304 305 306
	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
		pr_debug("Missing rd_pages= parameter\n");
		return -EINVAL;
	}
307

308 309
	ret = rd_build_device_space(rd_dev);
	if (ret < 0)
310 311
		goto fail;

312 313 314
	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
315 316 317

	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;

318
	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
319
		" %u pages in %u tables, %lu total bytes\n",
320
		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
321 322 323
		rd_dev->sg_table_count,
		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));

324
	return 0;
325 326 327

fail:
	rd_release_device_space(rd_dev);
328
	return ret;
329 330
}

331
static void rd_free_device(struct se_device *dev)
332
{
333
	struct rd_dev *rd_dev = RD_DEV(dev);
334 335 336 337 338 339 340 341

	rd_release_device_space(rd_dev);
	kfree(rd_dev);
}

static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
342 343
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));
344

345 346
	i = page / sg_per_table;
	if (i < rd_dev->sg_table_count) {
347 348 349 350 351 352
		sg_table = &rd_dev->sg_table_array[i];
		if ((sg_table->page_start_offset <= page) &&
		    (sg_table->page_end_offset >= page))
			return sg_table;
	}

353
	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
354 355 356 357 358
			page);

	return NULL;
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
	struct rd_dev_sg_table *sg_table;
	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
				sizeof(struct scatterlist));

	i = page / sg_per_table;
	if (i < rd_dev->sg_prot_count) {
		sg_table = &rd_dev->sg_prot_array[i];
		if ((sg_table->page_start_offset <= page) &&
		     (sg_table->page_end_offset >= page))
			return sg_table;
	}

	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
			page);

	return NULL;
}

379
static sense_reason_t
380 381
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
	      enum dma_data_direction data_direction)
382
{
383
	struct se_device *se_dev = cmd->se_dev;
384
	struct rd_dev *dev = RD_DEV(se_dev);
385
	struct rd_dev_sg_table *table;
386 387
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
388 389 390
	u32 rd_offset;
	u32 rd_size;
	u32 rd_page;
391
	u32 src_len;
392
	u64 tmp;
393
	sense_reason_t rc;
394

395 396 397 398 399
	if (dev->rd_flags & RDF_NULLIO) {
		target_complete_cmd(cmd, SAM_STAT_GOOD);
		return 0;
	}

400
	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
401 402
	rd_offset = do_div(tmp, PAGE_SIZE);
	rd_page = tmp;
403
	rd_size = cmd->data_length;
404 405

	table = rd_get_sg_table(dev, rd_page);
406
	if (!table)
407
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
408

409
	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
410

411
	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
412
			dev->rd_dev_id,
413 414
			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
			cmd->t_task_lba, rd_size, rd_page, rd_offset);
415

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
		struct rd_dev_sg_table *prot_table;
		struct scatterlist *prot_sg;
		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
		u32 prot_offset, prot_page;

		tmp = cmd->t_task_lba * se_dev->prot_length;
		prot_offset = do_div(tmp, PAGE_SIZE);
		prot_page = tmp;

		prot_table = rd_get_prot_table(dev, prot_page);
		if (!prot_table)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];

		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
					  prot_sg, prot_offset);
		if (rc)
			return rc;
	}

438
	src_len = PAGE_SIZE - rd_offset;
439 440
	sg_miter_start(&m, sgl, sgl_nents,
			data_direction == DMA_FROM_DEVICE ?
441 442
				SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (rd_size) {
443 444
		u32 len;
		void *rd_addr;
445

446
		sg_miter_next(&m);
447 448 449 450 451 452
		if (!(u32)m.length) {
			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
				 dev->rd_dev_id, m.addr, m.length);
			sg_miter_stop(&m);
			return TCM_INCORRECT_AMOUNT_OF_DATA;
		}
453
		len = min((u32)m.length, src_len);
454 455 456 457 458 459
		if (len > rd_size) {
			pr_debug("RD[%u]: size underrun page %d offset %d "
				 "size %d\n", dev->rd_dev_id,
				 rd_page, rd_offset, rd_size);
			len = rd_size;
		}
460
		m.consumed = len;
461

462
		rd_addr = sg_virt(rd_sg) + rd_offset;
463

464
		if (data_direction == DMA_FROM_DEVICE)
465 466 467
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);
468

469 470
		rd_size -= len;
		if (!rd_size)
471 472
			continue;

473 474 475
		src_len -= len;
		if (src_len) {
			rd_offset += len;
476 477
			continue;
		}
478

479
		/* rd page completed, next one please */
480
		rd_page++;
481 482
		rd_offset = 0;
		src_len = PAGE_SIZE;
483
		if (rd_page <= table->page_end_offset) {
484
			rd_sg++;
485 486
			continue;
		}
487

488
		table = rd_get_sg_table(dev, rd_page);
489 490
		if (!table) {
			sg_miter_stop(&m);
491
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
492
		}
493

494 495
		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
496
	}
497
	sg_miter_stop(&m);
498

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
		struct rd_dev_sg_table *prot_table;
		struct scatterlist *prot_sg;
		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
		u32 prot_offset, prot_page;

		tmp = cmd->t_task_lba * se_dev->prot_length;
		prot_offset = do_div(tmp, PAGE_SIZE);
		prot_page = tmp;

		prot_table = rd_get_prot_table(dev, prot_page);
		if (!prot_table)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];

		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
					 prot_sg, prot_offset);
		if (rc)
			return rc;
	}

521
	target_complete_cmd(cmd, SAM_STAT_GOOD);
522
	return 0;
523 524 525
}

enum {
526
	Opt_rd_pages, Opt_rd_nullio, Opt_err
527 528 529 530
};

static match_table_t tokens = {
	{Opt_rd_pages, "rd_pages=%d"},
531
	{Opt_rd_nullio, "rd_nullio=%d"},
532 533 534
	{Opt_err, NULL}
};

535 536
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
537
{
538
	struct rd_dev *rd_dev = RD_DEV(dev);
539 540 541 542 543 544 545 546 547 548
	char *orig, *ptr, *opts;
	substring_t args[MAX_OPT_ARGS];
	int ret = 0, arg, token;

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

549
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
550 551 552 553 554 555 556 557
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_rd_pages:
			match_int(args, &arg);
			rd_dev->rd_page_count = arg;
558
			pr_debug("RAMDISK: Referencing Page"
559 560 561
				" Count: %u\n", rd_dev->rd_page_count);
			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
			break;
562 563 564 565 566 567 568 569
		case Opt_rd_nullio:
			match_int(args, &arg);
			if (arg != 1)
				break;

			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
			rd_dev->rd_flags |= RDF_NULLIO;
			break;
570 571 572 573 574 575 576 577 578
		default:
			break;
		}
	}

	kfree(orig);
	return (!ret) ? count : ret;
}

579
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
580
{
581
	struct rd_dev *rd_dev = RD_DEV(dev);
582

583 584
	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
			rd_dev->rd_dev_id);
585
	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
586 587 588
			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
			PAGE_SIZE, rd_dev->sg_table_count,
			!!(rd_dev->rd_flags & RDF_NULLIO));
589 590 591 592 593
	return bl;
}

static sector_t rd_get_blocks(struct se_device *dev)
{
594 595
	struct rd_dev *rd_dev = RD_DEV(dev);

596
	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
597
			dev->dev_attrib.block_size) - 1;
598 599 600 601

	return blocks_long;
}

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
static int rd_init_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

        if (!dev->dev_attrib.pi_prot_type)
		return 0;

	return rd_build_prot_space(rd_dev, dev->prot_length);
}

static void rd_free_prot(struct se_device *dev)
{
	struct rd_dev *rd_dev = RD_DEV(dev);

	rd_release_prot_space(rd_dev);
}

C
Christoph Hellwig 已提交
619
static struct sbc_ops rd_sbc_ops = {
620 621 622
	.execute_rw		= rd_execute_rw,
};

623 624
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
625
{
C
Christoph Hellwig 已提交
626
	return sbc_parse_cdb(cmd, &rd_sbc_ops);
627 628
}

629 630
static struct se_subsystem_api rd_mcp_template = {
	.name			= "rd_mcp",
631 632
	.inquiry_prod		= "RAMDISK-MCP",
	.inquiry_rev		= RD_MCP_VERSION,
633 634 635
	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
	.attach_hba		= rd_attach_hba,
	.detach_hba		= rd_detach_hba,
636 637
	.alloc_device		= rd_alloc_device,
	.configure_device	= rd_configure_device,
638
	.free_device		= rd_free_device,
639
	.parse_cdb		= rd_parse_cdb,
640 641
	.set_configfs_dev_params = rd_set_configfs_dev_params,
	.show_configfs_dev_params = rd_show_configfs_dev_params,
642
	.get_device_type	= sbc_get_device_type,
643
	.get_blocks		= rd_get_blocks,
644 645
	.init_prot		= rd_init_prot,
	.free_prot		= rd_free_prot,
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
};

int __init rd_module_init(void)
{
	int ret;

	ret = transport_subsystem_register(&rd_mcp_template);
	if (ret < 0) {
		return ret;
	}

	return 0;
}

void rd_module_exit(void)
{
	transport_subsystem_release(&rd_mcp_template);
}