target_core_iblock.c 18.9 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_iblock.c
 *
 * This file contains the Storage Engine  <-> Linux BlockIO transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
37
#include <linux/module.h>
38 39
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
40
#include <asm/unaligned.h>
41 42

#include <target/target_core_base.h>
43
#include <target/target_core_backend.h>
44 45 46

#include "target_core_iblock.h"

47 48 49
#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE	128

50 51 52 53 54 55
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
	return container_of(dev, struct iblock_dev, dev);
}


56 57 58 59 60 61 62 63
static struct se_subsystem_api iblock_template;

/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
64
	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 66 67 68 69 70 71 72 73
		" Generic Target Core Stack %s\n", hba->hba_id,
		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
	return 0;
}

static void iblock_detach_hba(struct se_hba *hba)
{
}

74
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
75 76 77 78
{
	struct iblock_dev *ib_dev = NULL;

	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
79 80
	if (!ib_dev) {
		pr_err("Unable to allocate struct iblock_dev\n");
81 82 83
		return NULL;
	}

84
	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
85

86
	return &ib_dev->dev;
87 88
}

89
static int iblock_configure_device(struct se_device *dev)
90
{
91
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92
	struct request_queue *q;
93
	struct block_device *bd = NULL;
94
	fmode_t mode;
95
	int ret = -ENOMEM;
96

97 98 99
	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
		pr_err("Missing udev_path= parameters for IBLOCK\n");
		return -EINVAL;
100
	}
101 102

	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
103
	if (!ib_dev->ibd_bio_set) {
104 105
		pr_err("IBLOCK: Unable to create bioset\n");
		goto out;
106
	}
107

108
	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
109 110
			ib_dev->ibd_udev_path);

111 112 113 114 115
	mode = FMODE_READ|FMODE_EXCL;
	if (!ib_dev->ibd_readonly)
		mode |= FMODE_WRITE;

	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
116 117
	if (IS_ERR(bd)) {
		ret = PTR_ERR(bd);
118
		goto out_free_bioset;
119
	}
120 121
	ib_dev->ibd_bd = bd;

122 123 124 125 126
	q = bdev_get_queue(bd);

	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = q->nr_requests;
127 128 129 130 131 132

	/*
	 * Check if the underlying struct block_device request_queue supports
	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
	 * in ATA and we need to set TPE=1
	 */
133
	if (blk_queue_discard(q)) {
134
		dev->dev_attrib.max_unmap_lba_count =
135
				q->limits.max_discard_sectors;
136

137 138 139
		/*
		 * Currently hardcoded to 1 in Linux/SCSI code..
		 */
140 141
		dev->dev_attrib.max_unmap_block_desc_count = 1;
		dev->dev_attrib.unmap_granularity =
142
				q->limits.discard_granularity >> 9;
143
		dev->dev_attrib.unmap_granularity_alignment =
144 145
				q->limits.discard_alignment;

146
		pr_debug("IBLOCK: BLOCK Discard support available,"
147 148
				" disabled by default\n");
	}
149 150 151 152 153
	/*
	 * Enable write same emulation for IBLOCK and use 0xFFFF as
	 * the smaller WRITE_SAME(10) only has a two-byte block count.
	 */
	dev->dev_attrib.max_write_same_len = 0xFFFF;
154

155
	if (blk_queue_nonrot(q))
156
		dev->dev_attrib.is_nonrot = 1;
157

158
	return 0;
159

160 161 162 163 164
out_free_bioset:
	bioset_free(ib_dev->ibd_bio_set);
	ib_dev->ibd_bio_set = NULL;
out:
	return ret;
165 166
}

167
static void iblock_free_device(struct se_device *dev)
168
{
169
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
170

171 172 173 174
	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
175 176 177 178 179 180 181 182 183 184 185 186
	kfree(ib_dev);
}

static unsigned long long iblock_emulate_read_cap_with_block_size(
	struct se_device *dev,
	struct block_device *bd,
	struct request_queue *q)
{
	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
					bdev_logical_block_size(bd)) - 1);
	u32 block_size = bdev_logical_block_size(bd);

187
	if (block_size == dev->dev_attrib.block_size)
188 189 190 191
		return blocks_long;

	switch (block_size) {
	case 4096:
192
		switch (dev->dev_attrib.block_size) {
193 194 195 196 197 198 199 200 201 202 203 204 205
		case 2048:
			blocks_long <<= 1;
			break;
		case 1024:
			blocks_long <<= 2;
			break;
		case 512:
			blocks_long <<= 3;
		default:
			break;
		}
		break;
	case 2048:
206
		switch (dev->dev_attrib.block_size) {
207 208 209 210 211 212 213 214 215 216 217 218 219 220
		case 4096:
			blocks_long >>= 1;
			break;
		case 1024:
			blocks_long <<= 1;
			break;
		case 512:
			blocks_long <<= 2;
			break;
		default:
			break;
		}
		break;
	case 1024:
221
		switch (dev->dev_attrib.block_size) {
222 223 224 225 226 227 228 229 230 231 232 233 234 235
		case 4096:
			blocks_long >>= 2;
			break;
		case 2048:
			blocks_long >>= 1;
			break;
		case 512:
			blocks_long <<= 1;
			break;
		default:
			break;
		}
		break;
	case 512:
236
		switch (dev->dev_attrib.block_size) {
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		case 4096:
			blocks_long >>= 3;
			break;
		case 2048:
			blocks_long >>= 2;
			break;
		case 1024:
			blocks_long >>= 1;
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}

	return blocks_long;
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static void iblock_complete_cmd(struct se_cmd *cmd)
{
	struct iblock_req *ibr = cmd->priv;
	u8 status;

	if (!atomic_dec_and_test(&ibr->pending))
		return;

	if (atomic_read(&ibr->ib_bio_err_cnt))
		status = SAM_STAT_CHECK_CONDITION;
	else
		status = SAM_STAT_GOOD;

	target_complete_cmd(cmd, status);
	kfree(ibr);
}

static void iblock_bio_done(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_req *ibr = cmd->priv;

	/*
	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
	 */
	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
		err = -EIO;

	if (err != 0) {
		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
			" err: %d\n", bio, err);
		/*
		 * Bump the ib_bio_err_cnt and release bio.
		 */
		atomic_inc(&ibr->ib_bio_err_cnt);
		smp_mb__after_atomic_inc();
	}

	bio_put(bio);

	iblock_complete_cmd(cmd);
}

static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
	struct bio *bio;

	/*
	 * Only allocate as many vector entries as the bio code allows us to,
	 * we'll loop later on until we have handled the whole request.
	 */
	if (sg_num > BIO_MAX_PAGES)
		sg_num = BIO_MAX_PAGES;

	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
	if (!bio) {
		pr_err("Unable to allocate memory for bio\n");
		return NULL;
	}

	bio->bi_bdev = ib_dev->ibd_bd;
	bio->bi_private = cmd;
	bio->bi_end_io = &iblock_bio_done;
	bio->bi_sector = lba;

	return bio;
}

static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}

338 339 340 341 342 343 344
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

345
	if (cmd) {
346
		if (err)
347
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
348
		else
349 350 351
			target_complete_cmd(cmd, SAM_STAT_GOOD);
	}

352 353 354
	bio_put(bio);
}

355
/*
356 357
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
358
 */
359 360
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
361
{
362
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
363
	int immed = (cmd->t_task_cdb[1] & 0x2);
364
	struct bio *bio;
365 366 367

	/*
	 * If the Immediate bit is set, queue up the GOOD response
368
	 * for this SYNCHRONIZE_CACHE op.
369 370
	 */
	if (immed)
371
		target_complete_cmd(cmd, SAM_STAT_GOOD);
372

373 374 375
	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
376
	if (!immed)
377 378
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
379
	return 0;
380 381
}

382
static sense_reason_t
383
iblock_do_unmap(struct se_cmd *cmd, void *priv,
384 385
		sector_t lba, sector_t nolb)
{
386
	struct block_device *bdev = priv;
387 388 389 390 391 392 393 394 395 396 397
	int ret;

	ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
	if (ret < 0) {
		pr_err("blkdev_issue_discard() failed: %d\n", ret);
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}

	return 0;
}

398 399
static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
400
{
401
	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
402

403
	return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
404 405
}

406
static sense_reason_t
407
iblock_execute_write_same_unmap(struct se_cmd *cmd)
408
{
409 410 411 412 413 414 415 416
	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
	sector_t lba = cmd->t_task_lba;
	sector_t nolb = sbc_get_write_same_sectors(cmd);
	int ret;

	ret = iblock_do_unmap(cmd, bdev, lba, nolb);
	if (ret)
		return ret;
417 418 419 420 421

	target_complete_cmd(cmd, GOOD);
	return 0;
}

422 423 424 425 426 427 428 429
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_req *ibr;
	struct scatterlist *sg;
	struct bio *bio;
	struct bio_list list;
	sector_t block_lba = cmd->t_task_lba;
430
	sector_t sectors = sbc_get_write_same_sectors(cmd);
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484

	sg = &cmd->t_data_sg[0];

	if (cmd->t_data_nents > 1 ||
	    sg->length != cmd->se_dev->dev_attrib.block_size) {
		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
			" block_size: %u\n", cmd->t_data_nents, sg->length,
			cmd->se_dev->dev_attrib.block_size);
		return TCM_INVALID_CDB_FIELD;
	}

	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

	bio = iblock_get_bio(cmd, block_lba, 1);
	if (!bio)
		goto fail_free_ibr;

	bio_list_init(&list);
	bio_list_add(&list, bio);

	atomic_set(&ibr->pending, 1);

	while (sectors) {
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {

			bio = iblock_get_bio(cmd, block_lba, 1);
			if (!bio)
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
			bio_list_add(&list, bio);
		}

		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sectors -= 1;
	}

	iblock_submit_bios(&list, WRITE);
	return 0;

fail_put_bios:
	while ((bio = bio_list_pop(&list)))
		bio_put(bio);
fail_free_ibr:
	kfree(ibr);
fail:
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

485
enum {
486
	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
487 488 489 490
};

static match_table_t tokens = {
	{Opt_udev_path, "udev_path=%s"},
491
	{Opt_readonly, "readonly=%d"},
492 493 494 495
	{Opt_force, "force=%d"},
	{Opt_err, NULL}
};

496 497
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
498
{
499
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
500
	char *orig, *ptr, *arg_p, *opts;
501
	substring_t args[MAX_OPT_ARGS];
502
	int ret = 0, token;
503
	unsigned long tmp_readonly;
504 505 506 507 508 509 510

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

511
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
512 513 514 515 516 517 518
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_udev_path:
			if (ib_dev->ibd_bd) {
519
				pr_err("Unable to set udev_path= while"
520 521 522 523
					" ib_dev->ibd_bd exists\n");
				ret = -EEXIST;
				goto out;
			}
524 525 526
			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
				SE_UDEV_PATH_LEN) == 0) {
				ret = -EINVAL;
527 528
				break;
			}
529
			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
530 531 532
					ib_dev->ibd_udev_path);
			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
			break;
533 534 535 536 537 538
		case Opt_readonly:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
539
			ret = kstrtoul(arg_p, 0, &tmp_readonly);
540 541
			kfree(arg_p);
			if (ret < 0) {
542
				pr_err("kstrtoul() failed for"
543 544 545 546 547 548
						" readonly=\n");
				goto out;
			}
			ib_dev->ibd_readonly = tmp_readonly;
			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
			break;
549 550 551 552 553 554 555 556 557 558 559 560
		case Opt_force:
			break;
		default:
			break;
		}
	}

out:
	kfree(orig);
	return (!ret) ? count : ret;
}

561
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
562
{
563 564
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
565 566 567 568 569 570
	char buf[BDEVNAME_SIZE];
	ssize_t bl = 0;

	if (bd)
		bl += sprintf(b + bl, "iBlock device: %s",
				bdevname(bd, buf));
571
	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
572
		bl += sprintf(b + bl, "  UDEV PATH: %s",
573 574
				ib_dev->ibd_udev_path);
	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
575 576 577 578

	bl += sprintf(b + bl, "        ");
	if (bd) {
		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
579
			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
580
			"" : (bd->bd_holder == ib_dev) ?
581 582
			"CLAIMED: IBLOCK" : "CLAIMED: OS");
	} else {
583
		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
584 585 586 587 588
	}

	return bl;
}

589
static sense_reason_t
590 591
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
		  enum dma_data_direction data_direction)
592
{
593
	struct se_device *dev = cmd->se_dev;
594
	struct iblock_req *ibr;
595 596
	struct bio *bio;
	struct bio_list list;
597
	struct scatterlist *sg;
598
	u32 sg_num = sgl_nents;
599
	sector_t block_lba;
600
	unsigned bio_cnt;
601
	int rw = 0;
602
	int i;
603

604
	if (data_direction == DMA_TO_DEVICE) {
605 606
		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
607
		/*
608 609
		 * Force writethrough using WRITE_FUA if a volatile write cache
		 * is not enabled, or if initiator set the Force Unit Access bit.
610
		 */
611 612 613 614 615
		if (q->flush_flags & REQ_FUA) {
			if (cmd->se_cmd_flags & SCF_FUA)
				rw = WRITE_FUA;
			else if (!(q->flush_flags & REQ_FLUSH))
				rw = WRITE_FUA;
616 617
			else
				rw = WRITE;
618
		} else {
619
			rw = WRITE;
620
		}
621 622 623 624
	} else {
		rw = READ;
	}

625
	/*
626 627
	 * Convert the blocksize advertised to the initiator to the 512 byte
	 * units unconditionally used by the Linux block layer.
628
	 */
629
	if (dev->dev_attrib.block_size == 4096)
630
		block_lba = (cmd->t_task_lba << 3);
631
	else if (dev->dev_attrib.block_size == 2048)
632
		block_lba = (cmd->t_task_lba << 2);
633
	else if (dev->dev_attrib.block_size == 1024)
634
		block_lba = (cmd->t_task_lba << 1);
635
	else if (dev->dev_attrib.block_size == 512)
636
		block_lba = cmd->t_task_lba;
637
	else {
638
		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
639
				" %u\n", dev->dev_attrib.block_size);
640
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
641 642
	}

643 644 645 646 647
	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

648 649 650 651 652 653
	if (!sgl_nents) {
		atomic_set(&ibr->pending, 1);
		iblock_complete_cmd(cmd);
		return 0;
	}

654 655 656
	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
	if (!bio)
		goto fail_free_ibr;
657 658 659

	bio_list_init(&list);
	bio_list_add(&list, bio);
660 661

	atomic_set(&ibr->pending, 2);
662
	bio_cnt = 1;
663

664
	for_each_sg(sgl, sg, sgl_nents, i) {
665 666 667 668 669 670 671
		/*
		 * XXX: if the length the device accepts is shorter than the
		 *	length of the S/G list entry this will cause and
		 *	endless loop.  Better hope no driver uses huge pages.
		 */
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
672 673 674 675 676
			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
				iblock_submit_bios(&list, rw);
				bio_cnt = 0;
			}

677
			bio = iblock_get_bio(cmd, block_lba, sg_num);
678
			if (!bio)
679 680 681
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
682
			bio_list_add(&list, bio);
683
			bio_cnt++;
684
		}
685

686 687 688 689 690
		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sg_num--;
	}

691
	iblock_submit_bios(&list, rw);
692
	iblock_complete_cmd(cmd);
693
	return 0;
694

695
fail_put_bios:
696
	while ((bio = bio_list_pop(&list)))
697
		bio_put(bio);
698 699 700
fail_free_ibr:
	kfree(ibr);
fail:
701
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
702 703 704 705
}

static sector_t iblock_get_blocks(struct se_device *dev)
{
706 707
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
708 709 710 711 712
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	int ret;

	ret = bdev_alignment_offset(bd);
	if (ret == -1)
		return 0;

	/* convert offset-bytes to offset-lbas */
	return ret / bdev_logical_block_size(bd);
}

static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);

	return ilog2(logs_per_phys);
}

static unsigned int iblock_get_io_min(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;

	return bdev_io_min(bd);
}

static unsigned int iblock_get_io_opt(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;

	return bdev_io_opt(bd);
}

C
Christoph Hellwig 已提交
752
static struct sbc_ops iblock_sbc_ops = {
753
	.execute_rw		= iblock_execute_rw,
754
	.execute_sync_cache	= iblock_execute_sync_cache,
755
	.execute_write_same	= iblock_execute_write_same,
756
	.execute_write_same_unmap = iblock_execute_write_same_unmap,
757
	.execute_unmap		= iblock_execute_unmap,
758 759
};

760 761
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
762
{
C
Christoph Hellwig 已提交
763
	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
764 765
}

766
static bool iblock_get_write_cache(struct se_device *dev)
767 768 769 770 771 772 773 774
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	struct request_queue *q = bdev_get_queue(bd);

	return q->flush_flags & REQ_FLUSH;
}

775 776
static struct se_subsystem_api iblock_template = {
	.name			= "iblock",
777 778
	.inquiry_prod		= "IBLOCK",
	.inquiry_rev		= IBLOCK_VERSION,
779 780 781 782
	.owner			= THIS_MODULE,
	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
	.attach_hba		= iblock_attach_hba,
	.detach_hba		= iblock_detach_hba,
783 784
	.alloc_device		= iblock_alloc_device,
	.configure_device	= iblock_configure_device,
785
	.free_device		= iblock_free_device,
786
	.parse_cdb		= iblock_parse_cdb,
787 788
	.set_configfs_dev_params = iblock_set_configfs_dev_params,
	.show_configfs_dev_params = iblock_show_configfs_dev_params,
789
	.get_device_type	= sbc_get_device_type,
790
	.get_blocks		= iblock_get_blocks,
791 792 793 794
	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
	.get_lbppbe		= iblock_get_lbppbe,
	.get_io_min		= iblock_get_io_min,
	.get_io_opt		= iblock_get_io_opt,
795
	.get_write_cache	= iblock_get_write_cache,
796 797 798 799 800 801 802
};

static int __init iblock_module_init(void)
{
	return transport_subsystem_register(&iblock_template);
}

803
static void __exit iblock_module_exit(void)
804 805 806 807 808 809 810 811 812 813
{
	transport_subsystem_release(&iblock_template);
}

MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");

module_init(iblock_module_init);
module_exit(iblock_module_exit);