target_core_iblock.c 18.8 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_iblock.c
 *
 * This file contains the Storage Engine  <-> Linux BlockIO transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2012 RisingTide Systems LLC.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
37
#include <linux/module.h>
38 39
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
40
#include <asm/unaligned.h>
41 42

#include <target/target_core_base.h>
43
#include <target/target_core_backend.h>
44 45 46

#include "target_core_iblock.h"

47 48 49
#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE	128

50 51 52 53 54 55
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
	return container_of(dev, struct iblock_dev, dev);
}


56 57 58 59 60 61 62 63
static struct se_subsystem_api iblock_template;

/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
64
	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 66 67 68 69 70 71 72 73
		" Generic Target Core Stack %s\n", hba->hba_id,
		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
	return 0;
}

static void iblock_detach_hba(struct se_hba *hba)
{
}

74
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
75 76 77 78
{
	struct iblock_dev *ib_dev = NULL;

	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
79 80
	if (!ib_dev) {
		pr_err("Unable to allocate struct iblock_dev\n");
81 82 83
		return NULL;
	}

84
	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
85

86
	return &ib_dev->dev;
87 88
}

89
static int iblock_configure_device(struct se_device *dev)
90
{
91
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92
	struct request_queue *q;
93
	struct block_device *bd = NULL;
94
	fmode_t mode;
95
	int ret = -ENOMEM;
96

97 98 99
	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
		pr_err("Missing udev_path= parameters for IBLOCK\n");
		return -EINVAL;
100
	}
101 102

	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
103
	if (!ib_dev->ibd_bio_set) {
104 105
		pr_err("IBLOCK: Unable to create bioset\n");
		goto out;
106
	}
107

108
	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
109 110
			ib_dev->ibd_udev_path);

111 112 113 114 115
	mode = FMODE_READ|FMODE_EXCL;
	if (!ib_dev->ibd_readonly)
		mode |= FMODE_WRITE;

	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
116 117
	if (IS_ERR(bd)) {
		ret = PTR_ERR(bd);
118
		goto out_free_bioset;
119
	}
120 121
	ib_dev->ibd_bd = bd;

122 123 124 125 126
	q = bdev_get_queue(bd);

	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = q->nr_requests;
127 128 129 130 131 132

	/*
	 * Check if the underlying struct block_device request_queue supports
	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
	 * in ATA and we need to set TPE=1
	 */
133
	if (blk_queue_discard(q)) {
134
		dev->dev_attrib.max_unmap_lba_count =
135
				q->limits.max_discard_sectors;
136

137 138 139
		/*
		 * Currently hardcoded to 1 in Linux/SCSI code..
		 */
140 141
		dev->dev_attrib.max_unmap_block_desc_count = 1;
		dev->dev_attrib.unmap_granularity =
142
				q->limits.discard_granularity >> 9;
143
		dev->dev_attrib.unmap_granularity_alignment =
144 145
				q->limits.discard_alignment;

146
		pr_debug("IBLOCK: BLOCK Discard support available,"
147 148
				" disabled by default\n");
	}
149 150 151 152 153
	/*
	 * Enable write same emulation for IBLOCK and use 0xFFFF as
	 * the smaller WRITE_SAME(10) only has a two-byte block count.
	 */
	dev->dev_attrib.max_write_same_len = 0xFFFF;
154

155
	if (blk_queue_nonrot(q))
156 157
		dev->dev_attrib.is_nonrot = 1;
	return 0;
158

159 160 161 162 163
out_free_bioset:
	bioset_free(ib_dev->ibd_bio_set);
	ib_dev->ibd_bio_set = NULL;
out:
	return ret;
164 165
}

166
static void iblock_free_device(struct se_device *dev)
167
{
168
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
169

170 171 172 173
	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
174 175 176 177 178 179 180 181 182 183 184 185
	kfree(ib_dev);
}

static unsigned long long iblock_emulate_read_cap_with_block_size(
	struct se_device *dev,
	struct block_device *bd,
	struct request_queue *q)
{
	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
					bdev_logical_block_size(bd)) - 1);
	u32 block_size = bdev_logical_block_size(bd);

186
	if (block_size == dev->dev_attrib.block_size)
187 188 189 190
		return blocks_long;

	switch (block_size) {
	case 4096:
191
		switch (dev->dev_attrib.block_size) {
192 193 194 195 196 197 198 199 200 201 202 203 204
		case 2048:
			blocks_long <<= 1;
			break;
		case 1024:
			blocks_long <<= 2;
			break;
		case 512:
			blocks_long <<= 3;
		default:
			break;
		}
		break;
	case 2048:
205
		switch (dev->dev_attrib.block_size) {
206 207 208 209 210 211 212 213 214 215 216 217 218 219
		case 4096:
			blocks_long >>= 1;
			break;
		case 1024:
			blocks_long <<= 1;
			break;
		case 512:
			blocks_long <<= 2;
			break;
		default:
			break;
		}
		break;
	case 1024:
220
		switch (dev->dev_attrib.block_size) {
221 222 223 224 225 226 227 228 229 230 231 232 233 234
		case 4096:
			blocks_long >>= 2;
			break;
		case 2048:
			blocks_long >>= 1;
			break;
		case 512:
			blocks_long <<= 1;
			break;
		default:
			break;
		}
		break;
	case 512:
235
		switch (dev->dev_attrib.block_size) {
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
		case 4096:
			blocks_long >>= 3;
			break;
		case 2048:
			blocks_long >>= 2;
			break;
		case 1024:
			blocks_long >>= 1;
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}

	return blocks_long;
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static void iblock_complete_cmd(struct se_cmd *cmd)
{
	struct iblock_req *ibr = cmd->priv;
	u8 status;

	if (!atomic_dec_and_test(&ibr->pending))
		return;

	if (atomic_read(&ibr->ib_bio_err_cnt))
		status = SAM_STAT_CHECK_CONDITION;
	else
		status = SAM_STAT_GOOD;

	target_complete_cmd(cmd, status);
	kfree(ibr);
}

static void iblock_bio_done(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_req *ibr = cmd->priv;

	/*
	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
	 */
	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
		err = -EIO;

	if (err != 0) {
		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
			" err: %d\n", bio, err);
		/*
		 * Bump the ib_bio_err_cnt and release bio.
		 */
		atomic_inc(&ibr->ib_bio_err_cnt);
		smp_mb__after_atomic_inc();
	}

	bio_put(bio);

	iblock_complete_cmd(cmd);
}

static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
	struct bio *bio;

	/*
	 * Only allocate as many vector entries as the bio code allows us to,
	 * we'll loop later on until we have handled the whole request.
	 */
	if (sg_num > BIO_MAX_PAGES)
		sg_num = BIO_MAX_PAGES;

	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
	if (!bio) {
		pr_err("Unable to allocate memory for bio\n");
		return NULL;
	}

	bio->bi_bdev = ib_dev->ibd_bd;
	bio->bi_private = cmd;
	bio->bi_end_io = &iblock_bio_done;
	bio->bi_sector = lba;

	return bio;
}

static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}

337 338 339 340 341 342 343
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

344
	if (cmd) {
345
		if (err)
346
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
347
		else
348 349 350
			target_complete_cmd(cmd, SAM_STAT_GOOD);
	}

351 352 353
	bio_put(bio);
}

354
/*
355 356
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
357
 */
358 359
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
360
{
361
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
362
	int immed = (cmd->t_task_cdb[1] & 0x2);
363
	struct bio *bio;
364 365 366

	/*
	 * If the Immediate bit is set, queue up the GOOD response
367
	 * for this SYNCHRONIZE_CACHE op.
368 369
	 */
	if (immed)
370
		target_complete_cmd(cmd, SAM_STAT_GOOD);
371

372 373 374
	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
375
	if (!immed)
376 377
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
378
	return 0;
379 380
}

381 382
static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
383
{
384
	struct se_device *dev = cmd->se_dev;
385
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
386 387
	unsigned char *buf, *ptr = NULL;
	sector_t lba;
388
	int size;
389
	u32 range;
390 391
	sense_reason_t ret = 0;
	int dl, bd_dl, err;
392

393 394 395
	if (cmd->data_length < 8) {
		pr_warn("UNMAP parameter list length %u too small\n",
			cmd->data_length);
396
		return TCM_INVALID_PARAMETER_LIST;
397 398
	}

399
	buf = transport_kmap_data_sg(cmd);
400 401
	if (!buf)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
402

403 404 405
	dl = get_unaligned_be16(&buf[0]);
	bd_dl = get_unaligned_be16(&buf[2]);

406 407 408 409 410 411 412
	size = cmd->data_length - 8;
	if (bd_dl > size)
		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
			cmd->data_length, bd_dl);
	else
		size = bd_dl;

413
	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
414
		ret = TCM_INVALID_PARAMETER_LIST;
415 416
		goto err;
	}
417 418 419 420

	/* First UNMAP block descriptor starts at 8 byte offset */
	ptr = &buf[8];
	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
421 422
		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);

423
	while (size >= 16) {
424 425 426 427 428
		lba = get_unaligned_be64(&ptr[0]);
		range = get_unaligned_be32(&ptr[8]);
		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
				 (unsigned long long)lba, range);

429
		if (range > dev->dev_attrib.max_unmap_lba_count) {
430
			ret = TCM_INVALID_PARAMETER_LIST;
431 432 433 434
			goto err;
		}

		if (lba + range > dev->transport->get_blocks(dev) + 1) {
435
			ret = TCM_ADDRESS_OUT_OF_RANGE;
436 437 438
			goto err;
		}

439
		err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
440
					   GFP_KERNEL, 0);
441
		if (err < 0) {
442
			pr_err("blkdev_issue_discard() failed: %d\n",
443 444
					err);
			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
445 446 447 448 449 450
			goto err;
		}

		ptr += 16;
		size -= 16;
	}
451

452 453 454 455 456
err:
	transport_kunmap_data_sg(cmd);
	if (!ret)
		target_complete_cmd(cmd, GOOD);
	return ret;
457 458
}

459
static sense_reason_t
460
iblock_execute_write_same_unmap(struct se_cmd *cmd)
461
{
462
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
463
	int rc;
464

465 466 467 468
	rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
			spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
	if (rc < 0) {
		pr_warn("blkdev_issue_discard() failed: %d\n", rc);
469
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
470 471 472 473 474 475
	}

	target_complete_cmd(cmd, GOOD);
	return 0;
}

476 477 478 479 480 481 482 483
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_req *ibr;
	struct scatterlist *sg;
	struct bio *bio;
	struct bio_list list;
	sector_t block_lba = cmd->t_task_lba;
484
	sector_t sectors = spc_get_write_same_sectors(cmd);
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538

	sg = &cmd->t_data_sg[0];

	if (cmd->t_data_nents > 1 ||
	    sg->length != cmd->se_dev->dev_attrib.block_size) {
		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
			" block_size: %u\n", cmd->t_data_nents, sg->length,
			cmd->se_dev->dev_attrib.block_size);
		return TCM_INVALID_CDB_FIELD;
	}

	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

	bio = iblock_get_bio(cmd, block_lba, 1);
	if (!bio)
		goto fail_free_ibr;

	bio_list_init(&list);
	bio_list_add(&list, bio);

	atomic_set(&ibr->pending, 1);

	while (sectors) {
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {

			bio = iblock_get_bio(cmd, block_lba, 1);
			if (!bio)
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
			bio_list_add(&list, bio);
		}

		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sectors -= 1;
	}

	iblock_submit_bios(&list, WRITE);
	return 0;

fail_put_bios:
	while ((bio = bio_list_pop(&list)))
		bio_put(bio);
fail_free_ibr:
	kfree(ibr);
fail:
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

539
enum {
540
	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
541 542 543 544
};

static match_table_t tokens = {
	{Opt_udev_path, "udev_path=%s"},
545
	{Opt_readonly, "readonly=%d"},
546 547 548 549
	{Opt_force, "force=%d"},
	{Opt_err, NULL}
};

550 551
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
552
{
553
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
554
	char *orig, *ptr, *arg_p, *opts;
555
	substring_t args[MAX_OPT_ARGS];
556
	int ret = 0, token;
557
	unsigned long tmp_readonly;
558 559 560 561 562 563 564

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

565
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
566 567 568 569 570 571 572
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_udev_path:
			if (ib_dev->ibd_bd) {
573
				pr_err("Unable to set udev_path= while"
574 575 576 577
					" ib_dev->ibd_bd exists\n");
				ret = -EEXIST;
				goto out;
			}
578 579 580
			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
				SE_UDEV_PATH_LEN) == 0) {
				ret = -EINVAL;
581 582
				break;
			}
583
			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
584 585 586
					ib_dev->ibd_udev_path);
			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
			break;
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
		case Opt_readonly:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
			ret = strict_strtoul(arg_p, 0, &tmp_readonly);
			kfree(arg_p);
			if (ret < 0) {
				pr_err("strict_strtoul() failed for"
						" readonly=\n");
				goto out;
			}
			ib_dev->ibd_readonly = tmp_readonly;
			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
			break;
603 604 605 606 607 608 609 610 611 612 613 614
		case Opt_force:
			break;
		default:
			break;
		}
	}

out:
	kfree(orig);
	return (!ret) ? count : ret;
}

615
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
616
{
617 618
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
619 620 621 622 623 624
	char buf[BDEVNAME_SIZE];
	ssize_t bl = 0;

	if (bd)
		bl += sprintf(b + bl, "iBlock device: %s",
				bdevname(bd, buf));
625
	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
626
		bl += sprintf(b + bl, "  UDEV PATH: %s",
627 628
				ib_dev->ibd_udev_path);
	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
629 630 631 632

	bl += sprintf(b + bl, "        ");
	if (bd) {
		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
633
			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
634
			"" : (bd->bd_holder == ib_dev) ?
635 636
			"CLAIMED: IBLOCK" : "CLAIMED: OS");
	} else {
637
		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
638 639 640 641 642
	}

	return bl;
}

643 644
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd)
645
{
646 647 648
	struct scatterlist *sgl = cmd->t_data_sg;
	u32 sgl_nents = cmd->t_data_nents;
	enum dma_data_direction data_direction = cmd->data_direction;
649
	struct se_device *dev = cmd->se_dev;
650
	struct iblock_req *ibr;
651 652
	struct bio *bio;
	struct bio_list list;
653
	struct scatterlist *sg;
654
	u32 sg_num = sgl_nents;
655
	sector_t block_lba;
656
	unsigned bio_cnt;
657
	int rw;
658
	int i;
659

660
	if (data_direction == DMA_TO_DEVICE) {
661 662 663 664
		/*
		 * Force data to disk if we pretend to not have a volatile
		 * write cache, or the initiator set the Force Unit Access bit.
		 */
665 666
		if (dev->dev_attrib.emulate_write_cache == 0 ||
		    (dev->dev_attrib.emulate_fua_write > 0 &&
667
		     (cmd->se_cmd_flags & SCF_FUA)))
668 669 670 671 672 673 674
			rw = WRITE_FUA;
		else
			rw = WRITE;
	} else {
		rw = READ;
	}

675
	/*
676 677
	 * Convert the blocksize advertised to the initiator to the 512 byte
	 * units unconditionally used by the Linux block layer.
678
	 */
679
	if (dev->dev_attrib.block_size == 4096)
680
		block_lba = (cmd->t_task_lba << 3);
681
	else if (dev->dev_attrib.block_size == 2048)
682
		block_lba = (cmd->t_task_lba << 2);
683
	else if (dev->dev_attrib.block_size == 1024)
684
		block_lba = (cmd->t_task_lba << 1);
685
	else if (dev->dev_attrib.block_size == 512)
686
		block_lba = cmd->t_task_lba;
687
	else {
688
		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
689
				" %u\n", dev->dev_attrib.block_size);
690
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
691 692
	}

693 694 695 696 697
	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

698 699 700 701 702 703
	if (!sgl_nents) {
		atomic_set(&ibr->pending, 1);
		iblock_complete_cmd(cmd);
		return 0;
	}

704 705 706
	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
	if (!bio)
		goto fail_free_ibr;
707 708 709

	bio_list_init(&list);
	bio_list_add(&list, bio);
710 711

	atomic_set(&ibr->pending, 2);
712
	bio_cnt = 1;
713

714
	for_each_sg(sgl, sg, sgl_nents, i) {
715 716 717 718 719 720 721
		/*
		 * XXX: if the length the device accepts is shorter than the
		 *	length of the S/G list entry this will cause and
		 *	endless loop.  Better hope no driver uses huge pages.
		 */
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
722 723 724 725 726
			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
				iblock_submit_bios(&list, rw);
				bio_cnt = 0;
			}

727
			bio = iblock_get_bio(cmd, block_lba, sg_num);
728
			if (!bio)
729 730 731
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
732
			bio_list_add(&list, bio);
733
			bio_cnt++;
734
		}
735

736 737 738 739 740
		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sg_num--;
	}

741
	iblock_submit_bios(&list, rw);
742
	iblock_complete_cmd(cmd);
743
	return 0;
744

745
fail_put_bios:
746
	while ((bio = bio_list_pop(&list)))
747
		bio_put(bio);
748 749 750
fail_free_ibr:
	kfree(ibr);
fail:
751
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
752 753 754 755
}

static sector_t iblock_get_blocks(struct se_device *dev)
{
756 757
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
758 759 760 761 762
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

C
Christoph Hellwig 已提交
763
static struct sbc_ops iblock_sbc_ops = {
764
	.execute_rw		= iblock_execute_rw,
765
	.execute_sync_cache	= iblock_execute_sync_cache,
766
	.execute_write_same	= iblock_execute_write_same,
767
	.execute_write_same_unmap = iblock_execute_write_same_unmap,
768
	.execute_unmap		= iblock_execute_unmap,
769 770
};

771 772
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
773
{
C
Christoph Hellwig 已提交
774
	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
775 776
}

777 778
static struct se_subsystem_api iblock_template = {
	.name			= "iblock",
779 780
	.inquiry_prod		= "IBLOCK",
	.inquiry_rev		= IBLOCK_VERSION,
781 782 783 784
	.owner			= THIS_MODULE,
	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
	.attach_hba		= iblock_attach_hba,
	.detach_hba		= iblock_detach_hba,
785 786
	.alloc_device		= iblock_alloc_device,
	.configure_device	= iblock_configure_device,
787
	.free_device		= iblock_free_device,
788
	.parse_cdb		= iblock_parse_cdb,
789 790
	.set_configfs_dev_params = iblock_set_configfs_dev_params,
	.show_configfs_dev_params = iblock_show_configfs_dev_params,
791
	.get_device_type	= sbc_get_device_type,
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
	.get_blocks		= iblock_get_blocks,
};

static int __init iblock_module_init(void)
{
	return transport_subsystem_register(&iblock_template);
}

static void iblock_module_exit(void)
{
	transport_subsystem_release(&iblock_template);
}

MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");

module_init(iblock_module_init);
module_exit(iblock_module_exit);