target_core_iblock.c 19.0 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_iblock.c
 *
 * This file contains the Storage Engine  <-> Linux BlockIO transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2012 RisingTide Systems LLC.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
37
#include <linux/module.h>
38 39
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
40
#include <asm/unaligned.h>
41 42

#include <target/target_core_base.h>
43
#include <target/target_core_backend.h>
44 45 46

#include "target_core_iblock.h"

47 48 49
#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE	128

50 51 52 53 54 55
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
	return container_of(dev, struct iblock_dev, dev);
}


56 57 58 59 60 61 62 63 64 65
static struct se_subsystem_api iblock_template;

static void iblock_bio_done(struct bio *, int);

/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
66
	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
67 68 69 70 71 72 73 74 75
		" Generic Target Core Stack %s\n", hba->hba_id,
		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
	return 0;
}

static void iblock_detach_hba(struct se_hba *hba)
{
}

76
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
77 78 79 80
{
	struct iblock_dev *ib_dev = NULL;

	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
81 82
	if (!ib_dev) {
		pr_err("Unable to allocate struct iblock_dev\n");
83 84 85
		return NULL;
	}

86
	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
87

88
	return &ib_dev->dev;
89 90
}

91
static int iblock_configure_device(struct se_device *dev)
92
{
93
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
94
	struct request_queue *q;
95
	struct block_device *bd = NULL;
96
	fmode_t mode;
97
	int ret = -ENOMEM;
98

99 100 101
	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
		pr_err("Missing udev_path= parameters for IBLOCK\n");
		return -EINVAL;
102
	}
103 104

	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
105
	if (!ib_dev->ibd_bio_set) {
106 107
		pr_err("IBLOCK: Unable to create bioset\n");
		goto out;
108
	}
109

110
	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
111 112
			ib_dev->ibd_udev_path);

113 114 115 116 117
	mode = FMODE_READ|FMODE_EXCL;
	if (!ib_dev->ibd_readonly)
		mode |= FMODE_WRITE;

	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
118 119
	if (IS_ERR(bd)) {
		ret = PTR_ERR(bd);
120
		goto out_free_bioset;
121
	}
122 123
	ib_dev->ibd_bd = bd;

124 125 126 127 128
	q = bdev_get_queue(bd);

	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
	dev->dev_attrib.hw_max_sectors = UINT_MAX;
	dev->dev_attrib.hw_queue_depth = q->nr_requests;
129 130 131 132 133 134

	/*
	 * Check if the underlying struct block_device request_queue supports
	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
	 * in ATA and we need to set TPE=1
	 */
135
	if (blk_queue_discard(q)) {
136
		dev->dev_attrib.max_unmap_lba_count =
137
				q->limits.max_discard_sectors;
138

139 140 141
		/*
		 * Currently hardcoded to 1 in Linux/SCSI code..
		 */
142 143
		dev->dev_attrib.max_unmap_block_desc_count = 1;
		dev->dev_attrib.unmap_granularity =
144
				q->limits.discard_granularity >> 9;
145
		dev->dev_attrib.unmap_granularity_alignment =
146 147
				q->limits.discard_alignment;

148
		pr_debug("IBLOCK: BLOCK Discard support available,"
149 150
				" disabled by default\n");
	}
151 152 153 154 155
	/*
	 * Enable write same emulation for IBLOCK and use 0xFFFF as
	 * the smaller WRITE_SAME(10) only has a two-byte block count.
	 */
	dev->dev_attrib.max_write_same_len = 0xFFFF;
156

157
	if (blk_queue_nonrot(q))
158 159
		dev->dev_attrib.is_nonrot = 1;
	return 0;
160

161 162 163 164 165
out_free_bioset:
	bioset_free(ib_dev->ibd_bio_set);
	ib_dev->ibd_bio_set = NULL;
out:
	return ret;
166 167
}

168
static void iblock_free_device(struct se_device *dev)
169
{
170
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
171

172 173 174 175
	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
176 177 178 179 180 181 182 183 184 185 186 187
	kfree(ib_dev);
}

static unsigned long long iblock_emulate_read_cap_with_block_size(
	struct se_device *dev,
	struct block_device *bd,
	struct request_queue *q)
{
	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
					bdev_logical_block_size(bd)) - 1);
	u32 block_size = bdev_logical_block_size(bd);

188
	if (block_size == dev->dev_attrib.block_size)
189 190 191 192
		return blocks_long;

	switch (block_size) {
	case 4096:
193
		switch (dev->dev_attrib.block_size) {
194 195 196 197 198 199 200 201 202 203 204 205 206
		case 2048:
			blocks_long <<= 1;
			break;
		case 1024:
			blocks_long <<= 2;
			break;
		case 512:
			blocks_long <<= 3;
		default:
			break;
		}
		break;
	case 2048:
207
		switch (dev->dev_attrib.block_size) {
208 209 210 211 212 213 214 215 216 217 218 219 220 221
		case 4096:
			blocks_long >>= 1;
			break;
		case 1024:
			blocks_long <<= 1;
			break;
		case 512:
			blocks_long <<= 2;
			break;
		default:
			break;
		}
		break;
	case 1024:
222
		switch (dev->dev_attrib.block_size) {
223 224 225 226 227 228 229 230 231 232 233 234 235 236
		case 4096:
			blocks_long >>= 2;
			break;
		case 2048:
			blocks_long >>= 1;
			break;
		case 512:
			blocks_long <<= 1;
			break;
		default:
			break;
		}
		break;
	case 512:
237
		switch (dev->dev_attrib.block_size) {
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
		case 4096:
			blocks_long >>= 3;
			break;
		case 2048:
			blocks_long >>= 2;
			break;
		case 1024:
			blocks_long >>= 1;
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}

	return blocks_long;
}

258 259 260 261 262 263 264
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

265
	if (cmd) {
266
		if (err)
267
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
268
		else
269 270 271
			target_complete_cmd(cmd, SAM_STAT_GOOD);
	}

272 273 274
	bio_put(bio);
}

275
/*
276 277
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
278
 */
279 280
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
281
{
282
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
283
	int immed = (cmd->t_task_cdb[1] & 0x2);
284
	struct bio *bio;
285 286 287

	/*
	 * If the Immediate bit is set, queue up the GOOD response
288
	 * for this SYNCHRONIZE_CACHE op.
289 290
	 */
	if (immed)
291
		target_complete_cmd(cmd, SAM_STAT_GOOD);
292

293 294 295
	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
296
	if (!immed)
297 298
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
299
	return 0;
300 301
}

302 303
static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
304
{
305
	struct se_device *dev = cmd->se_dev;
306
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
307 308
	unsigned char *buf, *ptr = NULL;
	sector_t lba;
309
	int size;
310
	u32 range;
311 312
	sense_reason_t ret = 0;
	int dl, bd_dl, err;
313

314 315 316
	if (cmd->data_length < 8) {
		pr_warn("UNMAP parameter list length %u too small\n",
			cmd->data_length);
317
		return TCM_INVALID_PARAMETER_LIST;
318 319
	}

320
	buf = transport_kmap_data_sg(cmd);
321 322
	if (!buf)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
323

324 325 326
	dl = get_unaligned_be16(&buf[0]);
	bd_dl = get_unaligned_be16(&buf[2]);

327 328 329 330 331 332 333
	size = cmd->data_length - 8;
	if (bd_dl > size)
		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
			cmd->data_length, bd_dl);
	else
		size = bd_dl;

334
	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
335
		ret = TCM_INVALID_PARAMETER_LIST;
336 337
		goto err;
	}
338 339 340 341

	/* First UNMAP block descriptor starts at 8 byte offset */
	ptr = &buf[8];
	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
342 343
		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);

344
	while (size >= 16) {
345 346 347 348 349
		lba = get_unaligned_be64(&ptr[0]);
		range = get_unaligned_be32(&ptr[8]);
		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
				 (unsigned long long)lba, range);

350
		if (range > dev->dev_attrib.max_unmap_lba_count) {
351
			ret = TCM_INVALID_PARAMETER_LIST;
352 353 354 355
			goto err;
		}

		if (lba + range > dev->transport->get_blocks(dev) + 1) {
356
			ret = TCM_ADDRESS_OUT_OF_RANGE;
357 358 359
			goto err;
		}

360
		err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
361
					   GFP_KERNEL, 0);
362
		if (err < 0) {
363
			pr_err("blkdev_issue_discard() failed: %d\n",
364 365
					err);
			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
366 367 368 369 370 371
			goto err;
		}

		ptr += 16;
		size -= 16;
	}
372

373 374 375 376 377
err:
	transport_kunmap_data_sg(cmd);
	if (!ret)
		target_complete_cmd(cmd, GOOD);
	return ret;
378 379
}

380 381 382 383
static struct bio *iblock_get_bio(struct se_cmd *, sector_t, u32);
static void iblock_submit_bios(struct bio_list *, int);
static void iblock_complete_cmd(struct se_cmd *);

384
static sense_reason_t
385
iblock_execute_write_same_unmap(struct se_cmd *cmd)
386
{
387
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
388
	int rc;
389

390 391 392 393
	rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
			spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
	if (rc < 0) {
		pr_warn("blkdev_issue_discard() failed: %d\n", rc);
394
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
395 396 397 398 399 400
	}

	target_complete_cmd(cmd, GOOD);
	return 0;
}

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_req *ibr;
	struct scatterlist *sg;
	struct bio *bio;
	struct bio_list list;
	sector_t block_lba = cmd->t_task_lba;
	unsigned int sectors = spc_get_write_same_sectors(cmd);

	sg = &cmd->t_data_sg[0];

	if (cmd->t_data_nents > 1 ||
	    sg->length != cmd->se_dev->dev_attrib.block_size) {
		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
			" block_size: %u\n", cmd->t_data_nents, sg->length,
			cmd->se_dev->dev_attrib.block_size);
		return TCM_INVALID_CDB_FIELD;
	}

	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

	bio = iblock_get_bio(cmd, block_lba, 1);
	if (!bio)
		goto fail_free_ibr;

	bio_list_init(&list);
	bio_list_add(&list, bio);

	atomic_set(&ibr->pending, 1);

	while (sectors) {
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {

			bio = iblock_get_bio(cmd, block_lba, 1);
			if (!bio)
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
			bio_list_add(&list, bio);
		}

		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sectors -= 1;
	}

	iblock_submit_bios(&list, WRITE);
	return 0;

fail_put_bios:
	while ((bio = bio_list_pop(&list)))
		bio_put(bio);
fail_free_ibr:
	kfree(ibr);
fail:
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

464
enum {
465
	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
466 467 468 469
};

static match_table_t tokens = {
	{Opt_udev_path, "udev_path=%s"},
470
	{Opt_readonly, "readonly=%d"},
471 472 473 474
	{Opt_force, "force=%d"},
	{Opt_err, NULL}
};

475 476
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
477
{
478
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
479
	char *orig, *ptr, *arg_p, *opts;
480
	substring_t args[MAX_OPT_ARGS];
481
	int ret = 0, token;
482
	unsigned long tmp_readonly;
483 484 485 486 487 488 489

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

490
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
491 492 493 494 495 496 497
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_udev_path:
			if (ib_dev->ibd_bd) {
498
				pr_err("Unable to set udev_path= while"
499 500 501 502
					" ib_dev->ibd_bd exists\n");
				ret = -EEXIST;
				goto out;
			}
503 504 505
			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
				SE_UDEV_PATH_LEN) == 0) {
				ret = -EINVAL;
506 507
				break;
			}
508
			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
509 510 511
					ib_dev->ibd_udev_path);
			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
			break;
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		case Opt_readonly:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
			ret = strict_strtoul(arg_p, 0, &tmp_readonly);
			kfree(arg_p);
			if (ret < 0) {
				pr_err("strict_strtoul() failed for"
						" readonly=\n");
				goto out;
			}
			ib_dev->ibd_readonly = tmp_readonly;
			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
			break;
528 529 530 531 532 533 534 535 536 537 538 539
		case Opt_force:
			break;
		default:
			break;
		}
	}

out:
	kfree(orig);
	return (!ret) ? count : ret;
}

540
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
541
{
542 543
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
544 545 546 547 548 549
	char buf[BDEVNAME_SIZE];
	ssize_t bl = 0;

	if (bd)
		bl += sprintf(b + bl, "iBlock device: %s",
				bdevname(bd, buf));
550
	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
551
		bl += sprintf(b + bl, "  UDEV PATH: %s",
552 553
				ib_dev->ibd_udev_path);
	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
554 555 556 557

	bl += sprintf(b + bl, "        ");
	if (bd) {
		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
558
			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
559
			"" : (bd->bd_holder == ib_dev) ?
560 561
			"CLAIMED: IBLOCK" : "CLAIMED: OS");
	} else {
562
		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
563 564 565 566 567
	}

	return bl;
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static void iblock_complete_cmd(struct se_cmd *cmd)
{
	struct iblock_req *ibr = cmd->priv;
	u8 status;

	if (!atomic_dec_and_test(&ibr->pending))
		return;

	if (atomic_read(&ibr->ib_bio_err_cnt))
		status = SAM_STAT_CHECK_CONDITION;
	else
		status = SAM_STAT_GOOD;

	target_complete_cmd(cmd, status);
	kfree(ibr);
}

585
static struct bio *
586
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
587
{
588
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
589 590
	struct bio *bio;

591 592 593 594 595 596 597
	/*
	 * Only allocate as many vector entries as the bio code allows us to,
	 * we'll loop later on until we have handled the whole request.
	 */
	if (sg_num > BIO_MAX_PAGES)
		sg_num = BIO_MAX_PAGES;

598
	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
599 600
	if (!bio) {
		pr_err("Unable to allocate memory for bio\n");
601 602 603 604
		return NULL;
	}

	bio->bi_bdev = ib_dev->ibd_bd;
605
	bio->bi_private = cmd;
606 607 608 609 610
	bio->bi_end_io = &iblock_bio_done;
	bio->bi_sector = lba;
	return bio;
}

611 612 613 614 615 616 617 618 619 620 621
static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}

622 623
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd)
624
{
625 626 627
	struct scatterlist *sgl = cmd->t_data_sg;
	u32 sgl_nents = cmd->t_data_nents;
	enum dma_data_direction data_direction = cmd->data_direction;
628
	struct se_device *dev = cmd->se_dev;
629
	struct iblock_req *ibr;
630 631
	struct bio *bio;
	struct bio_list list;
632
	struct scatterlist *sg;
633
	u32 sg_num = sgl_nents;
634
	sector_t block_lba;
635
	unsigned bio_cnt;
636
	int rw;
637
	int i;
638

639
	if (data_direction == DMA_TO_DEVICE) {
640 641 642 643
		/*
		 * Force data to disk if we pretend to not have a volatile
		 * write cache, or the initiator set the Force Unit Access bit.
		 */
644 645
		if (dev->dev_attrib.emulate_write_cache == 0 ||
		    (dev->dev_attrib.emulate_fua_write > 0 &&
646
		     (cmd->se_cmd_flags & SCF_FUA)))
647 648 649 650 651 652 653
			rw = WRITE_FUA;
		else
			rw = WRITE;
	} else {
		rw = READ;
	}

654
	/*
655 656
	 * Convert the blocksize advertised to the initiator to the 512 byte
	 * units unconditionally used by the Linux block layer.
657
	 */
658
	if (dev->dev_attrib.block_size == 4096)
659
		block_lba = (cmd->t_task_lba << 3);
660
	else if (dev->dev_attrib.block_size == 2048)
661
		block_lba = (cmd->t_task_lba << 2);
662
	else if (dev->dev_attrib.block_size == 1024)
663
		block_lba = (cmd->t_task_lba << 1);
664
	else if (dev->dev_attrib.block_size == 512)
665
		block_lba = cmd->t_task_lba;
666
	else {
667
		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
668
				" %u\n", dev->dev_attrib.block_size);
669
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
670 671
	}

672 673 674 675 676
	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

677 678 679 680 681 682
	if (!sgl_nents) {
		atomic_set(&ibr->pending, 1);
		iblock_complete_cmd(cmd);
		return 0;
	}

683 684 685
	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
	if (!bio)
		goto fail_free_ibr;
686 687 688

	bio_list_init(&list);
	bio_list_add(&list, bio);
689 690

	atomic_set(&ibr->pending, 2);
691
	bio_cnt = 1;
692

693
	for_each_sg(sgl, sg, sgl_nents, i) {
694 695 696 697 698 699 700
		/*
		 * XXX: if the length the device accepts is shorter than the
		 *	length of the S/G list entry this will cause and
		 *	endless loop.  Better hope no driver uses huge pages.
		 */
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
701 702 703 704 705
			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
				iblock_submit_bios(&list, rw);
				bio_cnt = 0;
			}

706
			bio = iblock_get_bio(cmd, block_lba, sg_num);
707
			if (!bio)
708 709 710
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
711
			bio_list_add(&list, bio);
712
			bio_cnt++;
713
		}
714

715 716 717 718 719
		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sg_num--;
	}

720
	iblock_submit_bios(&list, rw);
721
	iblock_complete_cmd(cmd);
722
	return 0;
723

724
fail_put_bios:
725
	while ((bio = bio_list_pop(&list)))
726
		bio_put(bio);
727 728 729
fail_free_ibr:
	kfree(ibr);
fail:
730
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
731 732 733 734
}

static sector_t iblock_get_blocks(struct se_device *dev)
{
735 736
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
737 738 739 740 741 742 743
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

static void iblock_bio_done(struct bio *bio, int err)
{
744 745
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_req *ibr = cmd->priv;
746

747 748 749
	/*
	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
	 */
750
	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
751 752 753
		err = -EIO;

	if (err != 0) {
754
		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
755 756 757 758 759 760 761
			" err: %d\n", bio, err);
		/*
		 * Bump the ib_bio_err_cnt and release bio.
		 */
		atomic_inc(&ibr->ib_bio_err_cnt);
		smp_mb__after_atomic_inc();
	}
762

763
	bio_put(bio);
764

765
	iblock_complete_cmd(cmd);
766 767
}

C
Christoph Hellwig 已提交
768
static struct sbc_ops iblock_sbc_ops = {
769
	.execute_rw		= iblock_execute_rw,
770
	.execute_sync_cache	= iblock_execute_sync_cache,
771
	.execute_write_same	= iblock_execute_write_same,
772
	.execute_write_same_unmap = iblock_execute_write_same_unmap,
773
	.execute_unmap		= iblock_execute_unmap,
774 775
};

776 777
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
778
{
C
Christoph Hellwig 已提交
779
	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
780 781
}

782 783
static struct se_subsystem_api iblock_template = {
	.name			= "iblock",
784 785
	.inquiry_prod		= "IBLOCK",
	.inquiry_rev		= IBLOCK_VERSION,
786 787 788 789
	.owner			= THIS_MODULE,
	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
	.attach_hba		= iblock_attach_hba,
	.detach_hba		= iblock_detach_hba,
790 791
	.alloc_device		= iblock_alloc_device,
	.configure_device	= iblock_configure_device,
792
	.free_device		= iblock_free_device,
793
	.parse_cdb		= iblock_parse_cdb,
794 795
	.set_configfs_dev_params = iblock_set_configfs_dev_params,
	.show_configfs_dev_params = iblock_show_configfs_dev_params,
796
	.get_device_type	= sbc_get_device_type,
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	.get_blocks		= iblock_get_blocks,
};

static int __init iblock_module_init(void)
{
	return transport_subsystem_register(&iblock_template);
}

static void iblock_module_exit(void)
{
	transport_subsystem_release(&iblock_template);
}

MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");

module_init(iblock_module_init);
module_exit(iblock_module_exit);