target_core_iblock.c 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*******************************************************************************
 * Filename:  target_core_iblock.c
 *
 * This file contains the Storage Engine  <-> Linux BlockIO transport
 * specific functions.
 *
 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
40
#include <linux/module.h>
41 42 43 44
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>

#include <target/target_core_base.h>
45
#include <target/target_core_backend.h>
46 47 48

#include "target_core_iblock.h"

49 50 51
#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE	128

52 53 54 55 56 57 58 59 60 61
static struct se_subsystem_api iblock_template;

static void iblock_bio_done(struct bio *, int);

/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
 *
 *
 */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
62
	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
63 64 65 66 67 68 69 70 71 72 73 74 75 76
		" Generic Target Core Stack %s\n", hba->hba_id,
		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
	return 0;
}

static void iblock_detach_hba(struct se_hba *hba)
{
}

static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
{
	struct iblock_dev *ib_dev = NULL;

	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
77 78
	if (!ib_dev) {
		pr_err("Unable to allocate struct iblock_dev\n");
79 80 81
		return NULL;
	}

82
	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

	return ib_dev;
}

static struct se_device *iblock_create_virtdevice(
	struct se_hba *hba,
	struct se_subsystem_dev *se_dev,
	void *p)
{
	struct iblock_dev *ib_dev = p;
	struct se_device *dev;
	struct se_dev_limits dev_limits;
	struct block_device *bd = NULL;
	struct request_queue *q;
	struct queue_limits *limits;
	u32 dev_flags = 0;
99
	fmode_t mode;
100
	int ret = -EINVAL;
101

102 103
	if (!ib_dev) {
		pr_err("Unable to locate struct iblock_dev parameter\n");
104
		return ERR_PTR(ret);
105 106
	}
	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
107 108

	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
109 110
	if (!ib_dev->ibd_bio_set) {
		pr_err("IBLOCK: Unable to create bioset()\n");
111
		return ERR_PTR(-ENOMEM);
112
	}
113
	pr_debug("IBLOCK: Created bio_set()\n");
114 115 116 117
	/*
	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
	 */
118
	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
119 120
			ib_dev->ibd_udev_path);

121 122 123 124 125
	mode = FMODE_READ|FMODE_EXCL;
	if (!ib_dev->ibd_readonly)
		mode |= FMODE_WRITE;

	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
126 127
	if (IS_ERR(bd)) {
		ret = PTR_ERR(bd);
128
		goto failed;
129
	}
130 131 132 133 134 135 136
	/*
	 * Setup the local scope queue_limits from struct request_queue->limits
	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
	 */
	q = bdev_get_queue(bd);
	limits = &dev_limits.limits;
	limits->logical_block_size = bdev_logical_block_size(bd);
137 138
	limits->max_hw_sectors = UINT_MAX;
	limits->max_sectors = UINT_MAX;
139 140
	dev_limits.hw_queue_depth = q->nr_requests;
	dev_limits.queue_depth = q->nr_requests;
141 142 143 144

	ib_dev->ibd_bd = bd;

	dev = transport_add_device_to_core_hba(hba,
145
			&iblock_template, se_dev, dev_flags, ib_dev,
146
			&dev_limits, "IBLOCK", IBLOCK_VERSION);
147
	if (!dev)
148 149 150 151 152 153 154
		goto failed;

	/*
	 * Check if the underlying struct block_device request_queue supports
	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
	 * in ATA and we need to set TPE=1
	 */
155
	if (blk_queue_discard(q)) {
156
		dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
157 158 159 160
				q->limits.max_discard_sectors;
		/*
		 * Currently hardcoded to 1 in Linux/SCSI code..
		 */
161 162
		dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
		dev->se_sub_dev->se_dev_attrib.unmap_granularity =
163
				q->limits.discard_granularity >> 9;
164
		dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
165 166
				q->limits.discard_alignment;

167
		pr_debug("IBLOCK: BLOCK Discard support available,"
168 169 170
				" disabled by default\n");
	}

171 172 173
	if (blk_queue_nonrot(q))
		dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;

174 175 176 177 178 179 180 181
	return dev;

failed:
	if (ib_dev->ibd_bio_set) {
		bioset_free(ib_dev->ibd_bio_set);
		ib_dev->ibd_bio_set = NULL;
	}
	ib_dev->ibd_bd = NULL;
182
	return ERR_PTR(ret);
183 184 185 186 187 188
}

static void iblock_free_device(void *p)
{
	struct iblock_dev *ib_dev = p;

189 190 191 192
	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
	if (ib_dev->ibd_bio_set != NULL)
		bioset_free(ib_dev->ibd_bio_set);
193 194 195 196 197 198 199 200 201 202 203 204
	kfree(ib_dev);
}

static unsigned long long iblock_emulate_read_cap_with_block_size(
	struct se_device *dev,
	struct block_device *bd,
	struct request_queue *q)
{
	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
					bdev_logical_block_size(bd)) - 1);
	u32 block_size = bdev_logical_block_size(bd);

205
	if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
206 207 208 209
		return blocks_long;

	switch (block_size) {
	case 4096:
210
		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
211 212 213 214 215 216 217 218 219 220 221 222 223
		case 2048:
			blocks_long <<= 1;
			break;
		case 1024:
			blocks_long <<= 2;
			break;
		case 512:
			blocks_long <<= 3;
		default:
			break;
		}
		break;
	case 2048:
224
		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
225 226 227 228 229 230 231 232 233 234 235 236 237 238
		case 4096:
			blocks_long >>= 1;
			break;
		case 1024:
			blocks_long <<= 1;
			break;
		case 512:
			blocks_long <<= 2;
			break;
		default:
			break;
		}
		break;
	case 1024:
239
		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
240 241 242 243 244 245 246 247 248 249 250 251 252 253
		case 4096:
			blocks_long >>= 2;
			break;
		case 2048:
			blocks_long >>= 1;
			break;
		case 512:
			blocks_long <<= 1;
			break;
		default:
			break;
		}
		break;
	case 512:
254
		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
		case 4096:
			blocks_long >>= 3;
			break;
		case 2048:
			blocks_long >>= 2;
			break;
		case 1024:
			blocks_long >>= 1;
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}

	return blocks_long;
}

275 276 277 278 279 280 281
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

282 283 284 285 286 287 288 289 290 291
	if (cmd) {
		if (err) {
			cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
		} else {
			target_complete_cmd(cmd, SAM_STAT_GOOD);
		}
	}

292 293 294
	bio_put(bio);
}

295
/*
296 297
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
298
 */
299
static int iblock_execute_sync_cache(struct se_cmd *cmd)
300 301
{
	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
302
	int immed = (cmd->t_task_cdb[1] & 0x2);
303
	struct bio *bio;
304 305 306

	/*
	 * If the Immediate bit is set, queue up the GOOD response
307
	 * for this SYNCHRONIZE_CACHE op.
308 309
	 */
	if (immed)
310
		target_complete_cmd(cmd, SAM_STAT_GOOD);
311

312 313 314
	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
315
	if (!immed)
316 317
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
318
	return 0;
319 320 321 322 323 324 325 326 327 328 329
}

static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
{
	struct iblock_dev *ibd = dev->dev_ptr;
	struct block_device *bd = ibd->ibd_bd;
	int barrier = 0;

	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
static int iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
	int ret;

	ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
				   spc_get_write_same_sectors(cmd), GFP_KERNEL,
				   0);
	if (ret < 0) {
		pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
		return ret;
	}

	target_complete_cmd(cmd, GOOD);
	return 0;
}

347
enum {
348
	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
349 350 351 352
};

static match_table_t tokens = {
	{Opt_udev_path, "udev_path=%s"},
353
	{Opt_readonly, "readonly=%d"},
354 355 356 357 358 359 360 361 362
	{Opt_force, "force=%d"},
	{Opt_err, NULL}
};

static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
					       struct se_subsystem_dev *se_dev,
					       const char *page, ssize_t count)
{
	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
363
	char *orig, *ptr, *arg_p, *opts;
364
	substring_t args[MAX_OPT_ARGS];
365
	int ret = 0, token;
366
	unsigned long tmp_readonly;
367 368 369 370 371 372 373

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

374
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
375 376 377 378 379 380 381
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_udev_path:
			if (ib_dev->ibd_bd) {
382
				pr_err("Unable to set udev_path= while"
383 384 385 386
					" ib_dev->ibd_bd exists\n");
				ret = -EEXIST;
				goto out;
			}
387 388 389 390 391 392 393 394
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
			snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
					"%s", arg_p);
			kfree(arg_p);
395
			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
396 397 398
					ib_dev->ibd_udev_path);
			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
			break;
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
		case Opt_readonly:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
			ret = strict_strtoul(arg_p, 0, &tmp_readonly);
			kfree(arg_p);
			if (ret < 0) {
				pr_err("strict_strtoul() failed for"
						" readonly=\n");
				goto out;
			}
			ib_dev->ibd_readonly = tmp_readonly;
			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
			break;
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		case Opt_force:
			break;
		default:
			break;
		}
	}

out:
	kfree(orig);
	return (!ret) ? count : ret;
}

static ssize_t iblock_check_configfs_dev_params(
	struct se_hba *hba,
	struct se_subsystem_dev *se_dev)
{
	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;

	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
434
		pr_err("Missing udev_path= parameters for IBLOCK\n");
435
		return -EINVAL;
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	}

	return 0;
}

static ssize_t iblock_show_configfs_dev_params(
	struct se_hba *hba,
	struct se_subsystem_dev *se_dev,
	char *b)
{
	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
	struct block_device *bd = ibd->ibd_bd;
	char buf[BDEVNAME_SIZE];
	ssize_t bl = 0;

	if (bd)
		bl += sprintf(b + bl, "iBlock device: %s",
				bdevname(bd, buf));
454 455
	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
		bl += sprintf(b + bl, "  UDEV PATH: %s",
456
				ibd->ibd_udev_path);
457
	bl += sprintf(b + bl, "  readonly: %d\n", ibd->ibd_readonly);
458 459 460 461

	bl += sprintf(b + bl, "        ");
	if (bd) {
		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
462
			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
J
Jörn Engel 已提交
463
			"" : (bd->bd_holder == ibd) ?
464 465
			"CLAIMED: IBLOCK" : "CLAIMED: OS");
	} else {
466
		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
467 468 469 470 471
	}

	return bl;
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
static void iblock_complete_cmd(struct se_cmd *cmd)
{
	struct iblock_req *ibr = cmd->priv;
	u8 status;

	if (!atomic_dec_and_test(&ibr->pending))
		return;

	if (atomic_read(&ibr->ib_bio_err_cnt))
		status = SAM_STAT_CHECK_CONDITION;
	else
		status = SAM_STAT_GOOD;

	target_complete_cmd(cmd, status);
	kfree(ibr);
}

489 490
static void iblock_bio_destructor(struct bio *bio)
{
491 492
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
493 494 495 496

	bio_free(bio, ib_dev->ibd_bio_set);
}

497
static struct bio *
498
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
499
{
500
	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
501 502
	struct bio *bio;

503 504 505 506 507 508 509
	/*
	 * Only allocate as many vector entries as the bio code allows us to,
	 * we'll loop later on until we have handled the whole request.
	 */
	if (sg_num > BIO_MAX_PAGES)
		sg_num = BIO_MAX_PAGES;

510
	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
511 512
	if (!bio) {
		pr_err("Unable to allocate memory for bio\n");
513 514 515 516
		return NULL;
	}

	bio->bi_bdev = ib_dev->ibd_bd;
517
	bio->bi_private = cmd;
518 519 520 521 522 523
	bio->bi_destructor = iblock_bio_destructor;
	bio->bi_end_io = &iblock_bio_done;
	bio->bi_sector = lba;
	return bio;
}

524 525 526 527 528 529 530 531 532 533 534
static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}

535
static int iblock_execute_rw(struct se_cmd *cmd)
536
{
537 538 539
	struct scatterlist *sgl = cmd->t_data_sg;
	u32 sgl_nents = cmd->t_data_nents;
	enum dma_data_direction data_direction = cmd->data_direction;
540
	struct se_device *dev = cmd->se_dev;
541
	struct iblock_req *ibr;
542 543
	struct bio *bio;
	struct bio_list list;
544
	struct scatterlist *sg;
545
	u32 sg_num = sgl_nents;
546
	sector_t block_lba;
547
	unsigned bio_cnt;
548
	int rw;
549
	int i;
550

551
	if (data_direction == DMA_TO_DEVICE) {
552 553 554 555 556 557
		/*
		 * Force data to disk if we pretend to not have a volatile
		 * write cache, or the initiator set the Force Unit Access bit.
		 */
		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
558
		     (cmd->se_cmd_flags & SCF_FUA)))
559 560 561 562 563 564 565
			rw = WRITE_FUA;
		else
			rw = WRITE;
	} else {
		rw = READ;
	}

566
	/*
567 568
	 * Convert the blocksize advertised to the initiator to the 512 byte
	 * units unconditionally used by the Linux block layer.
569
	 */
570
	if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
571
		block_lba = (cmd->t_task_lba << 3);
572
	else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
573
		block_lba = (cmd->t_task_lba << 2);
574
	else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
575
		block_lba = (cmd->t_task_lba << 1);
576
	else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
577
		block_lba = cmd->t_task_lba;
578
	else {
579
		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
580
				" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
581 582
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		return -ENOSYS;
583 584
	}

585 586 587 588 589 590 591 592
	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
	if (!bio)
		goto fail_free_ibr;
593 594 595

	bio_list_init(&list);
	bio_list_add(&list, bio);
596 597

	atomic_set(&ibr->pending, 2);
598
	bio_cnt = 1;
599

600
	for_each_sg(sgl, sg, sgl_nents, i) {
601 602 603 604 605 606 607
		/*
		 * XXX: if the length the device accepts is shorter than the
		 *	length of the S/G list entry this will cause and
		 *	endless loop.  Better hope no driver uses huge pages.
		 */
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
608 609 610 611 612
			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
				iblock_submit_bios(&list, rw);
				bio_cnt = 0;
			}

613
			bio = iblock_get_bio(cmd, block_lba, sg_num);
614
			if (!bio)
615 616 617
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
618
			bio_list_add(&list, bio);
619
			bio_cnt++;
620
		}
621

622 623 624 625 626
		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sg_num--;
	}

627
	iblock_submit_bios(&list, rw);
628
	iblock_complete_cmd(cmd);
629
	return 0;
630

631
fail_put_bios:
632
	while ((bio = bio_list_pop(&list)))
633
		bio_put(bio);
634 635
fail_free_ibr:
	kfree(ibr);
636
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
637
fail:
638
	return -ENOMEM;
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
}

static u32 iblock_get_device_rev(struct se_device *dev)
{
	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}

static u32 iblock_get_device_type(struct se_device *dev)
{
	return TYPE_DISK;
}

static sector_t iblock_get_blocks(struct se_device *dev)
{
	struct iblock_dev *ibd = dev->dev_ptr;
	struct block_device *bd = ibd->ibd_bd;
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

static void iblock_bio_done(struct bio *bio, int err)
{
662 663
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_req *ibr = cmd->priv;
664

665 666 667
	/*
	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
	 */
668
	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
669 670 671
		err = -EIO;

	if (err != 0) {
672
		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
673 674 675 676 677 678 679
			" err: %d\n", bio, err);
		/*
		 * Bump the ib_bio_err_cnt and release bio.
		 */
		atomic_inc(&ibr->ib_bio_err_cnt);
		smp_mb__after_atomic_inc();
	}
680

681
	bio_put(bio);
682

683
	iblock_complete_cmd(cmd);
684 685
}

686 687
static struct spc_ops iblock_spc_ops = {
	.execute_rw		= iblock_execute_rw,
688
	.execute_sync_cache	= iblock_execute_sync_cache,
689
	.execute_write_same	= iblock_execute_write_same,
690 691 692 693 694 695 696
};

static int iblock_parse_cdb(struct se_cmd *cmd)
{
	return sbc_parse_cdb(cmd, &iblock_spc_ops);
}

697 698 699 700
static struct se_subsystem_api iblock_template = {
	.name			= "iblock",
	.owner			= THIS_MODULE,
	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
701 702
	.write_cache_emulated	= 1,
	.fua_write_emulated	= 1,
703 704 705 706 707
	.attach_hba		= iblock_attach_hba,
	.detach_hba		= iblock_detach_hba,
	.allocate_virtdevice	= iblock_allocate_virtdevice,
	.create_virtdevice	= iblock_create_virtdevice,
	.free_device		= iblock_free_device,
708
	.parse_cdb		= iblock_parse_cdb,
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	.do_discard		= iblock_do_discard,
	.check_configfs_dev_params = iblock_check_configfs_dev_params,
	.set_configfs_dev_params = iblock_set_configfs_dev_params,
	.show_configfs_dev_params = iblock_show_configfs_dev_params,
	.get_device_rev		= iblock_get_device_rev,
	.get_device_type	= iblock_get_device_type,
	.get_blocks		= iblock_get_blocks,
};

static int __init iblock_module_init(void)
{
	return transport_subsystem_register(&iblock_template);
}

static void iblock_module_exit(void)
{
	transport_subsystem_release(&iblock_template);
}

MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");

module_init(iblock_module_init);
module_exit(iblock_module_exit);