target_core_user.c 53.2 KB
Newer Older
1 2 3
/*
 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
 * Copyright (C) 2014 Red Hat, Inc.
4
 * Copyright (C) 2015 Arrikto, Inc.
5
 * Copyright (C) 2017 Chinamobile, Inc.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/idr.h>
24
#include <linux/kernel.h>
25 26
#include <linux/timer.h>
#include <linux/parser.h>
27
#include <linux/vmalloc.h>
28
#include <linux/uio_driver.h>
29
#include <linux/radix-tree.h>
30
#include <linux/stringify.h>
31
#include <linux/bitops.h>
32
#include <linux/highmem.h>
33
#include <linux/configfs.h>
34 35
#include <linux/mutex.h>
#include <linux/kthread.h>
36
#include <net/genetlink.h>
37 38
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
39 40 41
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <linux/target_core_user.h>

/*
 * Define a shared-memory interface for LIO to pass SCSI commands and
 * data to userspace for processing. This is to allow backends that
 * are too complex for in-kernel support to be possible.
 *
 * It uses the UIO framework to do a lot of the device-creation and
 * introspection work for us.
 *
 * See the .h file for how the ring is laid out. Note that while the
 * command ring is defined, the particulars of the data area are
 * not. Offset values in the command entry point to other locations
 * internal to the mmap()ed area. There is separate space outside the
 * command ring for data buffers. This leaves maximum flexibility for
 * moving buffer allocations, or even page flipping or other
 * allocation techniques, without altering the command ring layout.
 *
 * SECURITY:
 * The user process must be assumed to be malicious. There's no way to
 * prevent it breaking the command ring protocol if it wants, but in
 * order to prevent other issues we must only ever read *data* from
 * the shared memory area, not offsets or sizes. This applies to
 * command ring entries as well as the mailbox. Extra code needed for
 * this may have a 'UAM' comment.
 */

#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)

72 73
/* For cmd area, the size is fixed 8MB */
#define CMDR_SIZE (8 * 1024 * 1024)
74

75 76 77 78 79 80
/*
 * For data area, the block size is PAGE_SIZE and
 * the total size is 256K * PAGE_SIZE.
 */
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_BITS (256 * 1024)
81
#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
82
#define DATA_BLOCK_INIT_BITS 128
83

84
/* The total size of the ring is 8M + 256K * PAGE_SIZE */
85 86
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)

87 88 89
/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)

90 91
static u8 tcmu_kern_cmd_reply_supported;

92 93 94 95 96 97 98 99
static struct device *tcmu_root_device;

struct tcmu_hba {
	u32 host_id;
};

#define TCMU_CONFIG_LEN 256

100 101 102 103 104 105 106
struct tcmu_nl_cmd {
	/* wake up thread waiting for reply */
	struct completion complete;
	int cmd;
	int status;
};

107
struct tcmu_dev {
108
	struct list_head node;
109
	struct kref kref;
110 111 112 113 114 115 116 117 118 119 120
	struct se_device se_dev;

	char *name;
	struct se_hba *hba;

#define TCMU_DEV_BIT_OPEN 0
#define TCMU_DEV_BIT_BROKEN 1
	unsigned long flags;

	struct uio_info uio_info;

121 122
	struct inode *inode;

123 124 125 126
	struct tcmu_mailbox *mb_addr;
	size_t dev_size;
	u32 cmdr_size;
	u32 cmdr_last_cleaned;
127
	/* Offset of data area from start of mb */
128
	/* Must add data_off and mb_addr to get the address */
129 130
	size_t data_off;
	size_t data_size;
131

132
	wait_queue_head_t wait_cmdr;
133
	struct mutex cmdr_lock;
134

135
	bool waiting_global;
136
	uint32_t dbi_max;
137
	uint32_t dbi_thresh;
138 139 140
	DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
	struct radix_tree_root data_blocks;

141 142 143 144
	struct idr commands;
	spinlock_t commands_lock;

	struct timer_list timeout;
145
	unsigned int cmd_time_out;
146

147 148 149 150 151
	spinlock_t nl_cmd_lock;
	struct tcmu_nl_cmd curr_nl_cmd;
	/* wake up threads waiting on curr_nl_cmd */
	wait_queue_head_t nl_cmd_wq;

152
	char dev_config[TCMU_CONFIG_LEN];
153 154

	int nl_reply_supported;
155 156 157 158 159 160 161 162 163 164 165 166
};

#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)

#define CMDR_OFF sizeof(struct tcmu_mailbox)

struct tcmu_cmd {
	struct se_cmd *se_cmd;
	struct tcmu_dev *tcmu_dev;

	uint16_t cmd_id;

167
	/* Can't use se_cmd when cleaning up expired cmds, because if
168
	   cmd has been completed then accessing se_cmd is off limits */
169 170 171
	uint32_t dbi_cnt;
	uint32_t dbi_cur;
	uint32_t *dbi;
172 173 174 175 176 177 178

	unsigned long deadline;

#define TCMU_CMD_BIT_EXPIRED 0
	unsigned long flags;
};

179 180 181 182 183 184 185
static struct task_struct *unmap_thread;
static wait_queue_head_t unmap_wait;
static DEFINE_MUTEX(root_udev_mutex);
static LIST_HEAD(root_udev);

static atomic_t global_db_count = ATOMIC_INIT(0);

186 187 188 189 190 191 192 193 194 195 196
static struct kmem_cache *tcmu_cmd_cache;

/* multicast group */
enum tcmu_multicast_groups {
	TCMU_MCGRP_CONFIG,
};

static const struct genl_multicast_group tcmu_mcgrps[] = {
	[TCMU_MCGRP_CONFIG] = { .name = "config", },
};

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
	[TCMU_ATTR_DEVICE]	= { .type = NLA_STRING },
	[TCMU_ATTR_MINOR]	= { .type = NLA_U32 },
	[TCMU_ATTR_CMD_STATUS]	= { .type = NLA_S32 },
	[TCMU_ATTR_DEVICE_ID]	= { .type = NLA_U32 },
	[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
};

static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
	struct se_device *dev;
	struct tcmu_dev *udev;
	struct tcmu_nl_cmd *nl_cmd;
	int dev_id, rc, ret = 0;
	bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);

	if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
	    !info->attrs[TCMU_ATTR_DEVICE_ID]) {
		printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
                return -EINVAL;
        }

	dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
	rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);

	dev = target_find_device(dev_id, !is_removed);
	if (!dev) {
		printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
		       completed_cmd, rc, dev_id);
		return -ENODEV;
	}
	udev = TCMU_DEV(dev);

	spin_lock(&udev->nl_cmd_lock);
	nl_cmd = &udev->curr_nl_cmd;

	pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
		 nl_cmd->cmd, completed_cmd, rc);

	if (nl_cmd->cmd != completed_cmd) {
		printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
		       completed_cmd, nl_cmd->cmd);
		ret = -EINVAL;
	} else {
		nl_cmd->status = rc;
	}

	spin_unlock(&udev->nl_cmd_lock);
	if (!is_removed)
		 target_undepend_item(&dev->dev_group.cg_item);
	if (!ret)
		complete(&nl_cmd->complete);
	return ret;
}

static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
{
	return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
}

static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
{
	return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
}

static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
				       struct genl_info *info)
{
	return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
}

static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
{
	if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
		tcmu_kern_cmd_reply_supported  =
			nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
		printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
		       tcmu_kern_cmd_reply_supported);
	}

	return 0;
}

static const struct genl_ops tcmu_genl_ops[] = {
	{
		.cmd	= TCMU_CMD_SET_FEATURES,
		.flags	= GENL_ADMIN_PERM,
		.policy	= tcmu_attr_policy,
		.doit	= tcmu_genl_set_features,
	},
	{
		.cmd	= TCMU_CMD_ADDED_DEVICE_DONE,
		.flags	= GENL_ADMIN_PERM,
		.policy	= tcmu_attr_policy,
		.doit	= tcmu_genl_add_dev_done,
	},
	{
		.cmd	= TCMU_CMD_REMOVED_DEVICE_DONE,
		.flags	= GENL_ADMIN_PERM,
		.policy	= tcmu_attr_policy,
		.doit	= tcmu_genl_rm_dev_done,
	},
	{
		.cmd	= TCMU_CMD_RECONFIG_DEVICE_DONE,
		.flags	= GENL_ADMIN_PERM,
		.policy	= tcmu_attr_policy,
		.doit	= tcmu_genl_reconfig_dev_done,
	},
};

307
/* Our generic netlink family */
308
static struct genl_family tcmu_genl_family __ro_after_init = {
309
	.module = THIS_MODULE,
310 311
	.hdrsize = 0,
	.name = "TCM-USER",
312
	.version = 2,
313 314 315
	.maxattr = TCMU_ATTR_MAX,
	.mcgrps = tcmu_mcgrps,
	.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
316
	.netnsok = true,
317 318
	.ops = tcmu_genl_ops,
	.n_ops = ARRAY_SIZE(tcmu_genl_ops),
319 320
};

321 322 323 324 325
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])

326
static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
327 328 329 330
{
	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
	uint32_t i;

331
	for (i = 0; i < len; i++)
332 333 334
		clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}

335 336
static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
					struct tcmu_cmd *tcmu_cmd)
337
{
338 339
	struct page *page;
	int ret, dbi;
340

341 342 343
	dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
	if (dbi == udev->dbi_thresh)
		return false;
344

345 346 347 348 349 350
	page = radix_tree_lookup(&udev->data_blocks, dbi);
	if (!page) {
		if (atomic_add_return(1, &global_db_count) >
					TCMU_GLOBAL_MAX_BLOCKS) {
			atomic_dec(&global_db_count);
			return false;
351 352
		}

353 354 355
		/* try to get new page from the mm */
		page = alloc_page(GFP_KERNEL);
		if (!page)
356
			goto err_alloc;
357 358

		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
359 360
		if (ret)
			goto err_insert;
361 362
	}

363 364 365 366 367 368 369
	if (dbi > udev->dbi_max)
		udev->dbi_max = dbi;

	set_bit(dbi, udev->data_bitmap);
	tcmu_cmd_set_dbi(tcmu_cmd, dbi);

	return true;
370 371 372 373 374
err_insert:
	__free_page(page);
err_alloc:
	atomic_dec(&global_db_count);
	return false;
375 376
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
				  struct tcmu_cmd *tcmu_cmd)
{
	int i;

	udev->waiting_global = false;

	for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
		if (!tcmu_get_empty_block(udev, tcmu_cmd))
			goto err;
	}
	return true;

err:
	udev->waiting_global = true;
	/* Try to wake up the unmap thread */
	wake_up(&unmap_wait);
	return false;
}

static inline struct page *
tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
{
	return radix_tree_lookup(&udev->data_blocks, dbi);
}

static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
	kfree(tcmu_cmd->dbi);
	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}

static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
{
	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
	size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);

	if (se_cmd->se_cmd_flags & SCF_BIDI) {
		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
		data_length += round_up(se_cmd->t_bidi_data_sg->length,
				DATA_BLOCK_SIZE);
	}

	return data_length;
}

static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
{
	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);

	return data_length / DATA_BLOCK_SIZE;
}

430 431 432 433 434 435 436 437 438 439 440 441 442
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
{
	struct se_device *se_dev = se_cmd->se_dev;
	struct tcmu_dev *udev = TCMU_DEV(se_dev);
	struct tcmu_cmd *tcmu_cmd;

	tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
	if (!tcmu_cmd)
		return NULL;

	tcmu_cmd->se_cmd = se_cmd;
	tcmu_cmd->tcmu_dev = udev;

443 444 445 446 447 448 449 450 451
	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
	tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
	tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
				GFP_KERNEL);
	if (!tcmu_cmd->dbi) {
		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
		return NULL;
	}

452 453 454 455 456
	return tcmu_cmd;
}

static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
{
G
Geliang Tang 已提交
457
	unsigned long offset = offset_in_page(vaddr);
458
	void *start = vaddr - offset;
459 460 461 462

	size = round_up(size+offset, PAGE_SIZE);

	while (size) {
463 464
		flush_dcache_page(virt_to_page(start));
		start += PAGE_SIZE;
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
		size -= PAGE_SIZE;
	}
}

/*
 * Some ring helper functions. We don't assume size is a power of 2 so
 * we can't use circ_buf.h.
 */
static inline size_t spc_used(size_t head, size_t tail, size_t size)
{
	int diff = head - tail;

	if (diff >= 0)
		return diff;
	else
		return size + diff;
}

static inline size_t spc_free(size_t head, size_t tail, size_t size)
{
	/* Keep 1 byte unused or we can't tell full from empty */
	return (size - spc_used(head, tail, size) - 1);
}

static inline size_t head_to_end(size_t head, size_t size)
{
	return size - head;
}

494 495 496 497 498 499 500 501 502 503 504 505 506
static inline void new_iov(struct iovec **iov, int *iov_cnt,
			   struct tcmu_dev *udev)
{
	struct iovec *iovec;

	if (*iov_cnt != 0)
		(*iov)++;
	(*iov_cnt)++;

	iovec = *iov;
	memset(iovec, 0, sizeof(struct iovec));
}

507 508
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)

509
/* offset is relative to mb_addr */
510 511
static inline size_t get_block_offset_user(struct tcmu_dev *dev,
		int dbi, int remaining)
512
{
513
	return dev->data_off + dbi * DATA_BLOCK_SIZE +
514 515 516
		DATA_BLOCK_SIZE - remaining;
}

517
static inline size_t iov_tail(struct iovec *iov)
518 519 520 521
{
	return (size_t)iov->iov_base + iov->iov_len;
}

522
static int scatter_data_area(struct tcmu_dev *udev,
523 524 525
	struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
	unsigned int data_nents, struct iovec **iov,
	int *iov_cnt, bool copy_data)
526
{
527
	int i, dbi;
528
	int block_remaining = 0;
529 530
	void *from, *to = NULL;
	size_t copy_bytes, to_offset, offset;
531
	struct scatterlist *sg;
532
	struct page *page;
533 534

	for_each_sg(data_sg, sg, data_nents, i) {
535
		int sg_remaining = sg->length;
536
		from = kmap_atomic(sg_page(sg)) + sg->offset;
537 538
		while (sg_remaining > 0) {
			if (block_remaining == 0) {
539 540 541
				if (to)
					kunmap_atomic(to);

542
				block_remaining = DATA_BLOCK_SIZE;
543 544 545
				dbi = tcmu_cmd_get_dbi(tcmu_cmd);
				page = tcmu_get_block_page(udev, dbi);
				to = kmap_atomic(page);
546
			}
547

548 549
			copy_bytes = min_t(size_t, sg_remaining,
					block_remaining);
550
			to_offset = get_block_offset_user(udev, dbi,
551
					block_remaining);
552

553
			if (*iov_cnt != 0 &&
554
			    to_offset == iov_tail(*iov)) {
555 556 557
				(*iov)->iov_len += copy_bytes;
			} else {
				new_iov(iov, iov_cnt, udev);
558
				(*iov)->iov_base = (void __user *)to_offset;
559 560
				(*iov)->iov_len = copy_bytes;
			}
561
			if (copy_data) {
562 563 564 565
				offset = DATA_BLOCK_SIZE - block_remaining;
				memcpy(to + offset,
				       from + sg->length - sg_remaining,
				       copy_bytes);
566 567
				tcmu_flush_dcache_range(to, copy_bytes);
			}
568 569
			sg_remaining -= copy_bytes;
			block_remaining -= copy_bytes;
570
		}
571
		kunmap_atomic(from - sg->offset);
572
	}
573 574
	if (to)
		kunmap_atomic(to);
575

576
	return 0;
577 578
}

579 580
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
			     bool bidi)
581
{
582
	struct se_cmd *se_cmd = cmd->se_cmd;
583
	int i, dbi;
584
	int block_remaining = 0;
585
	void *from = NULL, *to;
586
	size_t copy_bytes, offset;
587
	struct scatterlist *sg, *data_sg;
588
	struct page *page;
589
	unsigned int data_nents;
590
	uint32_t count = 0;
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606

	if (!bidi) {
		data_sg = se_cmd->t_data_sg;
		data_nents = se_cmd->t_data_nents;
	} else {

		/*
		 * For bidi case, the first count blocks are for Data-Out
		 * buffer blocks, and before gathering the Data-In buffer
		 * the Data-Out buffer blocks should be discarded.
		 */
		count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);

		data_sg = se_cmd->t_bidi_data_sg;
		data_nents = se_cmd->t_bidi_data_nents;
	}
607

608 609
	tcmu_cmd_set_dbi_cur(cmd, count);

610
	for_each_sg(data_sg, sg, data_nents, i) {
611
		int sg_remaining = sg->length;
612
		to = kmap_atomic(sg_page(sg)) + sg->offset;
613 614
		while (sg_remaining > 0) {
			if (block_remaining == 0) {
615 616 617
				if (from)
					kunmap_atomic(from);

618
				block_remaining = DATA_BLOCK_SIZE;
619
				dbi = tcmu_cmd_get_dbi(cmd);
620 621
				page = tcmu_get_block_page(udev, dbi);
				from = kmap_atomic(page);
622 623 624
			}
			copy_bytes = min_t(size_t, sg_remaining,
					block_remaining);
625
			offset = DATA_BLOCK_SIZE - block_remaining;
626
			tcmu_flush_dcache_range(from, copy_bytes);
627
			memcpy(to + sg->length - sg_remaining, from + offset,
628
					copy_bytes);
629

630 631
			sg_remaining -= copy_bytes;
			block_remaining -= copy_bytes;
632
		}
633
		kunmap_atomic(to - sg->offset);
634
	}
635 636
	if (from)
		kunmap_atomic(from);
637 638
}

639
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
640
{
641
	return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh));
642 643
}

644
/*
645
 * We can't queue a command until we have space available on the cmd ring *and*
646
 * space available on the data area.
647 648 649
 *
 * Called with ring lock held.
 */
650 651
static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
		size_t cmd_size, size_t data_needed)
652 653
{
	struct tcmu_mailbox *mb = udev->mb_addr;
654 655
	uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
				/ DATA_BLOCK_SIZE;
656
	size_t space, cmd_needed;
657 658 659 660 661 662
	u32 cmd_head;

	tcmu_flush_dcache_range(mb, sizeof(*mb));

	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */

663 664 665 666 667 668 669 670 671
	/*
	 * If cmd end-of-ring space is too small then we need space for a NOP plus
	 * original cmd - cmds are internally contiguous.
	 */
	if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
		cmd_needed = cmd_size;
	else
		cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);

672 673 674 675 676 677 678
	space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
	if (space < cmd_needed) {
		pr_debug("no cmd space: %u %u %u\n", cmd_head,
		       udev->cmdr_last_cleaned, udev->cmdr_size);
		return false;
	}

679 680
	/* try to check and get the data blocks as needed */
	space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
681
	if (space < data_needed) {
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
		unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh;
		unsigned long grow;

		if (blocks_left < blocks_needed) {
			pr_debug("no data space: only %lu available, but ask for %zu\n",
					blocks_left * DATA_BLOCK_SIZE,
					data_needed);
			return false;
		}

		/* Try to expand the thresh */
		if (!udev->dbi_thresh) {
			/* From idle state */
			uint32_t init_thresh = DATA_BLOCK_INIT_BITS;

			udev->dbi_thresh = max(blocks_needed, init_thresh);
		} else {
			/*
			 * Grow the data area by max(blocks needed,
			 * dbi_thresh / 2), but limited to the max
			 * DATA_BLOCK_BITS size.
			 */
			grow = max(blocks_needed, udev->dbi_thresh / 2);
			udev->dbi_thresh += grow;
			if (udev->dbi_thresh > DATA_BLOCK_BITS)
				udev->dbi_thresh = DATA_BLOCK_BITS;
		}
709 710
	}

711
	return tcmu_get_empty_blocks(udev, cmd);
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
{
	return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
			sizeof(struct tcmu_cmd_entry));
}

static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
					   size_t base_command_size)
{
	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
	size_t command_size;

	command_size = base_command_size +
		round_up(scsi_command_size(se_cmd->t_task_cdb),
				TCMU_OP_ALIGN_SIZE);

	WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));

	return command_size;
}

M
Mike Christie 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
{
	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
	unsigned long tmo = udev->cmd_time_out;
	int cmd_id;

	if (tcmu_cmd->cmd_id)
		return 0;

	cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
	if (cmd_id < 0) {
		pr_err("tcmu: Could not allocate cmd id.\n");
		return cmd_id;
	}
	tcmu_cmd->cmd_id = cmd_id;

	if (!tmo)
		return 0;

	tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
	mod_timer(&udev->timeout, tcmu_cmd->deadline);
	return 0;
}

759 760
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
761 762 763 764 765 766 767
{
	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
	size_t base_command_size, command_size;
	struct tcmu_mailbox *mb;
	struct tcmu_cmd_entry *entry;
	struct iovec *iov;
768
	int iov_cnt, ret;
769 770
	uint32_t cmd_head;
	uint64_t cdb_off;
771
	bool copy_to_data_area;
772
	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
773 774

	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
775
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
776 777 778 779 780

	/*
	 * Must be a certain minimum size for response sense info, but
	 * also may be larger if the iov array is large.
	 *
781 782 783 784 785 786 787 788 789 790
	 * We prepare as many iovs as possbile for potential uses here,
	 * because it's expensive to tell how many regions are freed in
	 * the bitmap & global data pool, as the size calculated here
	 * will only be used to do the checks.
	 *
	 * The size will be recalculated later as actually needed to save
	 * cmd area memories.
	 */
	base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
791

792
	mutex_lock(&udev->cmdr_lock);
793 794 795

	mb = udev->mb_addr;
	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
796 797 798
	if ((command_size > (udev->cmdr_size / 2)) ||
	    data_length > udev->data_size) {
		pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
799
			"cmd ring/data area\n", command_size, data_length,
800
			udev->cmdr_size, udev->data_size);
801
		mutex_unlock(&udev->cmdr_lock);
802 803
		return TCM_INVALID_CDB_FIELD;
	}
804

805
	while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
806 807 808 809 810 811
		int ret;
		DEFINE_WAIT(__wait);

		prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);

		pr_debug("sleeping for ring space\n");
812
		mutex_unlock(&udev->cmdr_lock);
813 814 815 816 817
		if (udev->cmd_time_out)
			ret = schedule_timeout(
					msecs_to_jiffies(udev->cmd_time_out));
		else
			ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
818 819 820
		finish_wait(&udev->wait_cmdr, &__wait);
		if (!ret) {
			pr_warn("tcmu: command timed out\n");
821
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
822 823
		}

824
		mutex_lock(&udev->cmdr_lock);
825 826 827 828 829

		/* We dropped cmdr_lock, cmd_head is stale */
		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
	}

830 831 832 833
	/* Insert a PAD if end-of-ring space is too small */
	if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
		size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);

834
		entry = (void *) mb + CMDR_OFF + cmd_head;
A
Andy Grover 已提交
835 836 837 838 839
		tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
		tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
		entry->hdr.cmd_id = 0; /* not used for PAD */
		entry->hdr.kflags = 0;
		entry->hdr.uflags = 0;
840
		tcmu_flush_dcache_range(entry, sizeof(*entry));
841 842

		UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
843
		tcmu_flush_dcache_range(mb, sizeof(*mb));
844 845 846 847 848 849

		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
		WARN_ON(cmd_head != 0);
	}

	entry = (void *) mb + CMDR_OFF + cmd_head;
850
	memset(entry, 0, command_size);
A
Andy Grover 已提交
851
	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
852

853
	/* Handle allocating space from the data area */
854
	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
855
	iov = &entry->req.iov[0];
856
	iov_cnt = 0;
857 858
	copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
		|| se_cmd->se_cmd_flags & SCF_BIDI);
859 860 861
	ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
				se_cmd->t_data_nents, &iov, &iov_cnt,
				copy_to_data_area);
862
	if (ret) {
863 864 865
		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
		mutex_unlock(&udev->cmdr_lock);

866 867 868
		pr_err("tcmu: alloc and scatter data failed\n");
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}
869 870
	entry->req.iov_cnt = iov_cnt;

871
	/* Handle BIDI commands */
872
	iov_cnt = 0;
873 874
	if (se_cmd->se_cmd_flags & SCF_BIDI) {
		iov++;
875
		ret = scatter_data_area(udev, tcmu_cmd,
876 877 878 879
					se_cmd->t_bidi_data_sg,
					se_cmd->t_bidi_data_nents,
					&iov, &iov_cnt, false);
		if (ret) {
880 881 882
			tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
			mutex_unlock(&udev->cmdr_lock);

883 884 885
			pr_err("tcmu: alloc and scatter bidi data failed\n");
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		}
886
	}
887
	entry->req.iov_bidi_cnt = iov_cnt;
888

M
Mike Christie 已提交
889 890 891
	ret = tcmu_setup_cmd_timer(tcmu_cmd);
	if (ret) {
		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
892
		mutex_unlock(&udev->cmdr_lock);
M
Mike Christie 已提交
893 894 895 896
		return TCM_OUT_OF_RESOURCES;
	}
	entry->hdr.cmd_id = tcmu_cmd->cmd_id;

897 898 899 900 901 902 903 904 905 906
	/*
	 * Recalaulate the command's base size and size according
	 * to the actual needs
	 */
	base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
						       entry->req.iov_bidi_cnt);
	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);

	tcmu_hdr_set_len(&entry->hdr.len_op, command_size);

907 908 909 910 911 912 913 914
	/* All offsets relative to mb_addr, not start of entry! */
	cdb_off = CMDR_OFF + cmd_head + base_command_size;
	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
	entry->req.cdb_off = cdb_off;
	tcmu_flush_dcache_range(entry, sizeof(*entry));

	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
	tcmu_flush_dcache_range(mb, sizeof(*mb));
915
	mutex_unlock(&udev->cmdr_lock);
916 917 918 919

	/* TODO: only if FLUSH and FUA? */
	uio_event_notify(&udev->uio_info);

920 921 922
	if (udev->cmd_time_out)
		mod_timer(&udev->timeout, round_jiffies_up(jiffies +
			  msecs_to_jiffies(udev->cmd_time_out)));
923

924
	return TCM_NO_SENSE;
925 926
}

927 928
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
929 930
{
	struct tcmu_cmd *tcmu_cmd;
931
	sense_reason_t ret;
932 933 934

	tcmu_cmd = tcmu_alloc_cmd(se_cmd);
	if (!tcmu_cmd)
935
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
936 937

	ret = tcmu_queue_cmd_ring(tcmu_cmd);
938
	if (ret != TCM_NO_SENSE) {
939 940
		pr_err("TCMU: Could not queue command\n");

941
		tcmu_free_cmd(tcmu_cmd);
942 943 944 945 946 947 948 949 950 951
	}

	return ret;
}

static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
{
	struct se_cmd *se_cmd = cmd->se_cmd;
	struct tcmu_dev *udev = cmd->tcmu_dev;

952 953 954 955 956 957
	/*
	 * cmd has been completed already from timeout, just reclaim
	 * data area space and free cmd
	 */
	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
		goto out;
958

959
	tcmu_cmd_reset_dbi_cur(cmd);
960

A
Andy Grover 已提交
961 962 963
	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
			cmd->se_cmd);
964 965
		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
966
		transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
967
	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
968
		/* Get Data-In buffer before clean up */
969
		gather_data_area(udev, cmd, true);
970
	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
971
		gather_data_area(udev, cmd, false);
972
	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
973
		/* TODO: */
974 975 976
	} else if (se_cmd->data_direction != DMA_NONE) {
		pr_warn("TCMU: data direction was %d!\n",
			se_cmd->data_direction);
977 978 979 980
	}

	target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);

981 982
out:
	cmd->se_cmd = NULL;
983
	tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
984
	tcmu_free_cmd(cmd);
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
}

static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
	struct tcmu_mailbox *mb;
	int handled = 0;

	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
		pr_err("ring broken, not handling completions\n");
		return 0;
	}

	mb = udev->mb_addr;
	tcmu_flush_dcache_range(mb, sizeof(*mb));

1000
	while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1001 1002 1003 1004 1005 1006

		struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
		struct tcmu_cmd *cmd;

		tcmu_flush_dcache_range(entry, sizeof(*entry));

A
Andy Grover 已提交
1007 1008 1009 1010
		if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
			UPDATE_HEAD(udev->cmdr_last_cleaned,
				    tcmu_hdr_get_len(entry->hdr.len_op),
				    udev->cmdr_size);
1011 1012
			continue;
		}
A
Andy Grover 已提交
1013
		WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1014 1015

		spin_lock(&udev->commands_lock);
1016
		cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
		spin_unlock(&udev->commands_lock);

		if (!cmd) {
			pr_err("cmd_id not found, ring is broken\n");
			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
			break;
		}

		tcmu_handle_completion(cmd, entry);

A
Andy Grover 已提交
1027 1028 1029
		UPDATE_HEAD(udev->cmdr_last_cleaned,
			    tcmu_hdr_get_len(entry->hdr.len_op),
			    udev->cmdr_size);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

		handled++;
	}

	if (mb->cmd_tail == mb->cmd_head)
		del_timer(&udev->timeout); /* no more pending cmds */

	wake_up(&udev->wait_cmdr);

	return handled;
}

static int tcmu_check_expired_cmd(int id, void *p, void *data)
{
	struct tcmu_cmd *cmd = p;

	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
		return 0;

1049
	if (!time_after(jiffies, cmd->deadline))
1050 1051 1052 1053 1054 1055 1056 1057 1058
		return 0;

	set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
	target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
	cmd->se_cmd = NULL;

	return 0;
}

1059
static void tcmu_device_timedout(struct timer_list *t)
1060
{
1061
	struct tcmu_dev *udev = from_timer(udev, t, timeout);
1062 1063 1064 1065 1066 1067
	unsigned long flags;

	spin_lock_irqsave(&udev->commands_lock, flags);
	idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
	spin_unlock_irqrestore(&udev->commands_lock, flags);

1068 1069 1070
	/* Try to wake up the ummap thread */
	wake_up(&unmap_wait);

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	/*
	 * We don't need to wakeup threads on wait_cmdr since they have their
	 * own timeout.
	 */
}

static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
{
	struct tcmu_hba *tcmu_hba;

	tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
	if (!tcmu_hba)
		return -ENOMEM;

	tcmu_hba->host_id = host_id;
	hba->hba_ptr = tcmu_hba;

	return 0;
}

static void tcmu_detach_hba(struct se_hba *hba)
{
	kfree(hba->hba_ptr);
	hba->hba_ptr = NULL;
}

static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
{
	struct tcmu_dev *udev;

	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
	if (!udev)
		return NULL;
1104
	kref_init(&udev->kref);
1105 1106 1107 1108 1109 1110 1111 1112

	udev->name = kstrdup(name, GFP_KERNEL);
	if (!udev->name) {
		kfree(udev);
		return NULL;
	}

	udev->hba = hba;
1113
	udev->cmd_time_out = TCMU_TIME_OUT;
1114 1115

	init_waitqueue_head(&udev->wait_cmdr);
1116
	mutex_init(&udev->cmdr_lock);
1117 1118 1119 1120

	idr_init(&udev->commands);
	spin_lock_init(&udev->commands_lock);

1121
	timer_setup(&udev->timeout, tcmu_device_timedout, 0);
1122

1123 1124 1125
	init_waitqueue_head(&udev->nl_cmd_wq);
	spin_lock_init(&udev->nl_cmd_lock);

1126 1127
	INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);

1128 1129 1130 1131 1132 1133 1134
	return &udev->se_dev;
}

static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
{
	struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);

1135
	mutex_lock(&tcmu_dev->cmdr_lock);
1136
	tcmu_handle_completions(tcmu_dev);
1137
	mutex_unlock(&tcmu_dev->cmdr_lock);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158

	return 0;
}

/*
 * mmap code from uio.c. Copied here because we want to hook mmap()
 * and this stuff must come along.
 */
static int tcmu_find_mem_index(struct vm_area_struct *vma)
{
	struct tcmu_dev *udev = vma->vm_private_data;
	struct uio_info *info = &udev->uio_info;

	if (vma->vm_pgoff < MAX_UIO_MAPS) {
		if (info->mem[vma->vm_pgoff].size == 0)
			return -1;
		return (int)vma->vm_pgoff;
	}
	return -1;
}

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
{
	struct page *page;
	int ret;

	mutex_lock(&udev->cmdr_lock);
	page = tcmu_get_block_page(udev, dbi);
	if (likely(page)) {
		mutex_unlock(&udev->cmdr_lock);
		return page;
	}

	/*
	 * Normally it shouldn't be here:
	 * Only when the userspace has touched the blocks which
	 * are out of the tcmu_cmd's data iov[], and will return
	 * one zeroed page.
	 */
	pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
	pr_warn("Mostly it will be a bug of userspace, please have a check!\n");

	if (dbi >= udev->dbi_thresh) {
		/* Extern the udev->dbi_thresh to dbi + 1 */
		udev->dbi_thresh = dbi + 1;
		udev->dbi_max = dbi;
	}

	page = radix_tree_lookup(&udev->data_blocks, dbi);
	if (!page) {
		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (!page) {
			mutex_unlock(&udev->cmdr_lock);
			return NULL;
		}

		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
		if (ret) {
			mutex_unlock(&udev->cmdr_lock);
			__free_page(page);
			return NULL;
		}

		/*
		 * Since this case is rare in page fault routine, here we
		 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
		 * to reduce possible page fault call trace.
		 */
		atomic_inc(&global_db_count);
	}
	mutex_unlock(&udev->cmdr_lock);

	return page;
}

1213
static int tcmu_vma_fault(struct vm_fault *vmf)
1214
{
1215
	struct tcmu_dev *udev = vmf->vma->vm_private_data;
1216 1217 1218 1219 1220
	struct uio_info *info = &udev->uio_info;
	struct page *page;
	unsigned long offset;
	void *addr;

1221
	int mi = tcmu_find_mem_index(vmf->vma);
1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (mi < 0)
		return VM_FAULT_SIGBUS;

	/*
	 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
	 * to use mem[N].
	 */
	offset = (vmf->pgoff - mi) << PAGE_SHIFT;

1231 1232 1233
	if (offset < udev->data_off) {
		/* For the vmalloc()ed cmd area pages */
		addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1234
		page = vmalloc_to_page(addr);
1235 1236 1237
	} else {
		uint32_t dbi;

1238
		/* For the dynamically growing data area pages */
1239
		dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1240 1241
		page = tcmu_try_get_block_page(udev, dbi);
		if (!page)
1242 1243 1244
			return VM_FAULT_NOPAGE;
	}

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	get_page(page);
	vmf->page = page;
	return 0;
}

static const struct vm_operations_struct tcmu_vm_ops = {
	.fault = tcmu_vma_fault,
};

static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{
	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);

	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = &tcmu_vm_ops;

	vma->vm_private_data = udev;

	/* Ensure the mmap is exactly the right size */
	if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
		return -EINVAL;

	return 0;
}

static int tcmu_open(struct uio_info *info, struct inode *inode)
{
	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);

	/* O_EXCL not supported for char devs, so fake it? */
	if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
		return -EBUSY;

1278
	udev->inode = inode;
1279
	kref_get(&udev->kref);
1280

1281 1282 1283 1284 1285
	pr_debug("open\n");

	return 0;
}

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
static void tcmu_dev_call_rcu(struct rcu_head *p)
{
	struct se_device *dev = container_of(p, struct se_device, rcu_head);
	struct tcmu_dev *udev = TCMU_DEV(dev);

	kfree(udev->uio_info.name);
	kfree(udev->name);
	kfree(udev);
}

1296 1297 1298 1299 1300 1301 1302 1303 1304
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{
	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
		kmem_cache_free(tcmu_cmd_cache, cmd);
		return 0;
	}
	return -EINVAL;
}

1305 1306
static void tcmu_blocks_release(struct radix_tree_root *blocks,
				int start, int end)
1307 1308 1309 1310
{
	int i;
	struct page *page;

1311 1312
	for (i = start; i < end; i++) {
		page = radix_tree_delete(blocks, i);
1313 1314 1315 1316 1317 1318 1319
		if (page) {
			__free_page(page);
			atomic_dec(&global_db_count);
		}
	}
}

1320 1321 1322 1323
static void tcmu_dev_kref_release(struct kref *kref)
{
	struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
	struct se_device *dev = &udev->se_dev;
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	struct tcmu_cmd *cmd;
	bool all_expired = true;
	int i;

	vfree(udev->mb_addr);
	udev->mb_addr = NULL;

	/* Upper layer should drain all requests before calling this */
	spin_lock_irq(&udev->commands_lock);
	idr_for_each_entry(&udev->commands, cmd, i) {
		if (tcmu_check_and_free_pending_cmd(cmd) != 0)
			all_expired = false;
	}
	idr_destroy(&udev->commands);
	spin_unlock_irq(&udev->commands_lock);
	WARN_ON(!all_expired);

1341 1342 1343
	mutex_lock(&udev->cmdr_lock);
	tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
	mutex_unlock(&udev->cmdr_lock);
1344 1345 1346 1347

	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}

1348 1349 1350 1351 1352 1353 1354
static int tcmu_release(struct uio_info *info, struct inode *inode)
{
	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);

	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);

	pr_debug("close\n");
1355
	/* release ref from open */
1356
	kref_put(&udev->kref, tcmu_dev_kref_release);
1357 1358 1359
	return 0;
}

1360 1361 1362 1363 1364 1365
static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;

	if (!tcmu_kern_cmd_reply_supported)
		return;
1366 1367 1368 1369

	if (udev->nl_reply_supported <= 0)
		return;

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
relock:
	spin_lock(&udev->nl_cmd_lock);

	if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
		spin_unlock(&udev->nl_cmd_lock);
		pr_debug("sleeping for open nl cmd\n");
		wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
		goto relock;
	}

	memset(nl_cmd, 0, sizeof(*nl_cmd));
	nl_cmd->cmd = cmd;
	init_completion(&nl_cmd->complete);

	spin_unlock(&udev->nl_cmd_lock);
}

static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
	int ret;
	DEFINE_WAIT(__wait);

	if (!tcmu_kern_cmd_reply_supported)
		return 0;

1396 1397 1398
	if (udev->nl_reply_supported <= 0)
		return 0;

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	pr_debug("sleeping for nl reply\n");
	wait_for_completion(&nl_cmd->complete);

	spin_lock(&udev->nl_cmd_lock);
	nl_cmd->cmd = TCMU_CMD_UNSPEC;
	ret = nl_cmd->status;
	nl_cmd->status = 0;
	spin_unlock(&udev->nl_cmd_lock);

	wake_up_all(&udev->nl_cmd_wq);

	return ret;;
}

static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
			      int reconfig_attr, const void *reconfig_data)
1415 1416 1417
{
	struct sk_buff *skb;
	void *msg_header;
1418
	int ret = -ENOMEM;
1419 1420 1421

	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!skb)
1422
		return ret;
1423 1424

	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1425 1426
	if (!msg_header)
		goto free_skb;
1427

1428
	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1429 1430
	if (ret < 0)
		goto free_skb;
1431

1432 1433 1434 1435 1436
	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
	if (ret < 0)
		goto free_skb;

	ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1437 1438
	if (ret < 0)
		goto free_skb;
1439

1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
		switch (reconfig_attr) {
		case TCMU_ATTR_DEV_CFG:
			ret = nla_put_string(skb, reconfig_attr, reconfig_data);
			break;
		case TCMU_ATTR_DEV_SIZE:
			ret = nla_put_u64_64bit(skb, reconfig_attr,
						*((u64 *)reconfig_data),
						TCMU_ATTR_PAD);
			break;
		case TCMU_ATTR_WRITECACHE:
			ret = nla_put_u8(skb, reconfig_attr,
					  *((u8 *)reconfig_data));
			break;
		default:
			BUG();
		}

		if (ret < 0)
			goto free_skb;
	}
1461

1462
	genlmsg_end(skb, msg_header);
1463

1464 1465
	tcmu_init_genl_cmd_reply(udev, cmd);

1466
	ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1467 1468 1469 1470
				TCMU_MCGRP_CONFIG, GFP_KERNEL);
	/* We don't care if no one is listening */
	if (ret == -ESRCH)
		ret = 0;
1471 1472
	if (!ret)
		ret = tcmu_wait_genl_cmd_reply(udev);
1473 1474

	return ret;
1475 1476 1477
free_skb:
	nlmsg_free(skb);
	return ret;
1478 1479
}

B
Bryant G. Ly 已提交
1480
static int tcmu_update_uio_info(struct tcmu_dev *udev)
1481 1482 1483
{
	struct tcmu_hba *hba = udev->hba->hba_ptr;
	struct uio_info *info;
B
Bryant G. Ly 已提交
1484
	size_t size, used;
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
	char *str;

	info = &udev->uio_info;
	size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
			udev->dev_config);
	size += 1; /* for \0 */
	str = kmalloc(size, GFP_KERNEL);
	if (!str)
		return -ENOMEM;

	used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
	if (udev->dev_config[0])
		snprintf(str + used, size - used, "/%s", udev->dev_config);

B
Bryant G. Ly 已提交
1499 1500
	/* If the old string exists, free it */
	kfree(info->name);
1501 1502
	info->name = str;

B
Bryant G. Ly 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	return 0;
}

static int tcmu_configure_device(struct se_device *dev)
{
	struct tcmu_dev *udev = TCMU_DEV(dev);
	struct uio_info *info;
	struct tcmu_mailbox *mb;
	int ret = 0;

	ret = tcmu_update_uio_info(udev);
	if (ret)
		return ret;

	info = &udev->uio_info;

1519
	udev->mb_addr = vzalloc(CMDR_SIZE);
1520 1521 1522 1523 1524 1525 1526 1527
	if (!udev->mb_addr) {
		ret = -ENOMEM;
		goto err_vzalloc;
	}

	/* mailbox fits in first part of CMDR space */
	udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
	udev->data_off = CMDR_SIZE;
1528
	udev->data_size = DATA_SIZE;
1529 1530
	udev->dbi_thresh = 0; /* Default in Idle state */
	udev->waiting_global = false;
1531

1532
	/* Initialise the mailbox of the ring buffer */
1533
	mb = udev->mb_addr;
A
Andy Grover 已提交
1534
	mb->version = TCMU_MAILBOX_VERSION;
1535
	mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
1536 1537 1538 1539 1540
	mb->cmdr_off = CMDR_OFF;
	mb->cmdr_size = udev->cmdr_size;

	WARN_ON(!PAGE_ALIGNED(udev->data_off));
	WARN_ON(udev->data_size % PAGE_SIZE);
1541
	WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1542

1543
	info->version = __stringify(TCMU_MAILBOX_VERSION);
1544 1545

	info->mem[0].name = "tcm-user command & data buffer";
1546
	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1547
	info->mem[0].size = TCMU_RING_SIZE;
1548
	info->mem[0].memtype = UIO_MEM_NONE;
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560

	info->irqcontrol = tcmu_irqcontrol;
	info->irq = UIO_IRQ_CUSTOM;

	info->mmap = tcmu_mmap;
	info->open = tcmu_open;
	info->release = tcmu_release;

	ret = uio_register_device(tcmu_root_device, info);
	if (ret)
		goto err_register;

1561 1562 1563
	/* User can set hw_block_size before enable the device */
	if (dev->dev_attrib.hw_block_size == 0)
		dev->dev_attrib.hw_block_size = 512;
1564
	/* Other attributes can be configured in userspace */
1565 1566
	if (!dev->dev_attrib.hw_max_sectors)
		dev->dev_attrib.hw_max_sectors = 128;
B
Bryant G. Ly 已提交
1567 1568
	if (!dev->dev_attrib.emulate_write_cache)
		dev->dev_attrib.emulate_write_cache = 0;
1569 1570
	dev->dev_attrib.hw_queue_depth = 128;

1571 1572 1573 1574 1575 1576
	/* If user didn't explicitly disable netlink reply support, use
	 * module scope setting.
	 */
	if (udev->nl_reply_supported >= 0)
		udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;

1577 1578 1579 1580 1581 1582
	/*
	 * Get a ref incase userspace does a close on the uio device before
	 * LIO has initiated tcmu_free_device.
	 */
	kref_get(&udev->kref);

1583
	ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
1584 1585 1586
	if (ret)
		goto err_netlink;

1587 1588 1589 1590
	mutex_lock(&root_udev_mutex);
	list_add(&udev->node, &root_udev);
	mutex_unlock(&root_udev_mutex);

1591 1592 1593
	return 0;

err_netlink:
1594
	kref_put(&udev->kref, tcmu_dev_kref_release);
1595 1596 1597
	uio_unregister_device(&udev->uio_info);
err_register:
	vfree(udev->mb_addr);
1598
	udev->mb_addr = NULL;
1599 1600
err_vzalloc:
	kfree(info->name);
1601
	info->name = NULL;
1602 1603 1604 1605

	return ret;
}

1606 1607 1608 1609 1610
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
	return udev->uio_info.uio_dev ? true : false;
}

1611
static void tcmu_free_device(struct se_device *dev)
1612 1613 1614 1615 1616 1617 1618 1619
{
	struct tcmu_dev *udev = TCMU_DEV(dev);

	/* release ref from init */
	kref_put(&udev->kref, tcmu_dev_kref_release);
}

static void tcmu_destroy_device(struct se_device *dev)
1620 1621 1622 1623 1624
{
	struct tcmu_dev *udev = TCMU_DEV(dev);

	del_timer_sync(&udev->timeout);

1625 1626 1627 1628
	mutex_lock(&root_udev_mutex);
	list_del(&udev->node);
	mutex_unlock(&root_udev_mutex);

1629
	tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
1630

1631
	uio_unregister_device(&udev->uio_info);
1632 1633 1634

	/* release ref from configure */
	kref_put(&udev->kref, tcmu_dev_kref_release);
1635 1636 1637
}

enum {
1638
	Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1639
	Opt_nl_reply_supported, Opt_err,
1640 1641 1642 1643 1644
};

static match_table_t tokens = {
	{Opt_dev_config, "dev_config=%s"},
	{Opt_dev_size, "dev_size=%u"},
1645
	{Opt_hw_block_size, "hw_block_size=%u"},
1646
	{Opt_hw_max_sectors, "hw_max_sectors=%u"},
1647
	{Opt_nl_reply_supported, "nl_reply_supported=%d"},
1648 1649 1650
	{Opt_err, NULL}
};

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
	unsigned long tmp_ul;
	char *arg_p;
	int ret;

	arg_p = match_strdup(arg);
	if (!arg_p)
		return -ENOMEM;

	ret = kstrtoul(arg_p, 0, &tmp_ul);
	kfree(arg_p);
	if (ret < 0) {
		pr_err("kstrtoul() failed for dev attrib\n");
		return ret;
	}
	if (!tmp_ul) {
		pr_err("dev attrib must be nonzero\n");
		return -EINVAL;
	}
	*dev_attrib = tmp_ul;
	return 0;
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
{
	struct tcmu_dev *udev = TCMU_DEV(dev);
	char *orig, *ptr, *opts, *arg_p;
	substring_t args[MAX_OPT_ARGS];
	int ret = 0, token;

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

	while ((ptr = strsep(&opts, ",\n")) != NULL) {
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_dev_config:
			if (match_strlcpy(udev->dev_config, &args[0],
					  TCMU_CONFIG_LEN) == 0) {
				ret = -EINVAL;
				break;
			}
			pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
			break;
		case Opt_dev_size:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
			ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
			kfree(arg_p);
			if (ret < 0)
				pr_err("kstrtoul() failed for dev_size=\n");
			break;
1714
		case Opt_hw_block_size:
1715 1716 1717 1718 1719 1720
			ret = tcmu_set_dev_attrib(&args[0],
					&(dev->dev_attrib.hw_block_size));
			break;
		case Opt_hw_max_sectors:
			ret = tcmu_set_dev_attrib(&args[0],
					&(dev->dev_attrib.hw_max_sectors));
1721
			break;
1722 1723 1724 1725 1726 1727
		case Opt_nl_reply_supported:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
D
Dan Carpenter 已提交
1728
			ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
1729 1730
			kfree(arg_p);
			if (ret < 0)
D
Dan Carpenter 已提交
1731
				pr_err("kstrtoint() failed for nl_reply_supported=\n");
1732
			break;
1733 1734 1735
		default:
			break;
		}
1736 1737 1738

		if (ret)
			break;
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
	}

	kfree(orig);
	return (!ret) ? count : ret;
}

static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
{
	struct tcmu_dev *udev = TCMU_DEV(dev);
	ssize_t bl = 0;

	bl = sprintf(b + bl, "Config: %s ",
		     udev->dev_config[0] ? udev->dev_config : "NULL");
1752
	bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765

	return bl;
}

static sector_t tcmu_get_blocks(struct se_device *dev)
{
	struct tcmu_dev *udev = TCMU_DEV(dev);

	return div_u64(udev->dev_size - dev->dev_attrib.block_size,
		       dev->dev_attrib.block_size);
}

static sense_reason_t
1766
tcmu_parse_cdb(struct se_cmd *cmd)
1767
{
1768
	return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1769 1770
}

1771 1772 1773 1774
static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
					struct se_dev_attrib, da_group);
1775
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803

	return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}

static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
				       size_t count)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
					struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = container_of(da->da_dev,
					struct tcmu_dev, se_dev);
	u32 val;
	int ret;

	if (da->da_dev->export_count) {
		pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
		return -EINVAL;
	}

	ret = kstrtou32(page, 0, &val);
	if (ret < 0)
		return ret;

	udev->cmd_time_out = val * MSEC_PER_SEC;
	return count;
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);

1804
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
1805 1806 1807 1808 1809 1810 1811 1812
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);

	return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
}

1813 1814
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
				     size_t count)
1815 1816 1817 1818
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1819
	int ret, len;
1820

1821 1822
	len = strlen(page);
	if (!len || len > TCMU_CONFIG_LEN - 1)
1823 1824 1825 1826
		return -EINVAL;

	/* Check if device has been configured before */
	if (tcmu_dev_configured(udev)) {
1827
		ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1828
					 TCMU_ATTR_DEV_CFG, page);
1829 1830 1831 1832
		if (ret) {
			pr_err("Unable to reconfigure device\n");
			return ret;
		}
B
Bryant G. Ly 已提交
1833 1834 1835 1836 1837 1838
		strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);

		ret = tcmu_update_uio_info(udev);
		if (ret)
			return ret;
		return count;
1839
	}
1840
	strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
1841 1842 1843

	return count;
}
1844
CONFIGFS_ATTR(tcmu_, dev_config);
1845

1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);

	return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
}

static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
				   size_t count)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1861
	u64 val;
1862 1863
	int ret;

1864
	ret = kstrtou64(page, 0, &val);
1865 1866 1867 1868 1869
	if (ret < 0)
		return ret;

	/* Check if device has been configured before */
	if (tcmu_dev_configured(udev)) {
1870
		ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1871
					 TCMU_ATTR_DEV_SIZE, &val);
1872 1873 1874 1875 1876
		if (ret) {
			pr_err("Unable to reconfigure device\n");
			return ret;
		}
	}
1877
	udev->dev_size = val;
1878 1879 1880 1881
	return count;
}
CONFIGFS_ATTR(tcmu_, dev_size);

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
		char *page)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);

	return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
}

static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
		const char *page, size_t count)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
						struct se_dev_attrib, da_group);
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
	s8 val;
	int ret;

	ret = kstrtos8(page, 0, &val);
	if (ret < 0)
		return ret;

	udev->nl_reply_supported = val;
	return count;
}
CONFIGFS_ATTR(tcmu_, nl_reply_supported);

B
Bryant G. Ly 已提交
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
					     char *page)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
					struct se_dev_attrib, da_group);

	return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
}

static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
					      const char *page, size_t count)
{
	struct se_dev_attrib *da = container_of(to_config_group(item),
					struct se_dev_attrib, da_group);
1924
	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1925
	u8 val;
B
Bryant G. Ly 已提交
1926 1927
	int ret;

1928
	ret = kstrtou8(page, 0, &val);
B
Bryant G. Ly 已提交
1929 1930 1931
	if (ret < 0)
		return ret;

1932 1933
	/* Check if device has been configured before */
	if (tcmu_dev_configured(udev)) {
1934
		ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1935
					 TCMU_ATTR_WRITECACHE, &val);
1936 1937 1938 1939 1940
		if (ret) {
			pr_err("Unable to reconfigure device\n");
			return ret;
		}
	}
1941 1942

	da->emulate_write_cache = val;
B
Bryant G. Ly 已提交
1943 1944 1945 1946
	return count;
}
CONFIGFS_ATTR(tcmu_, emulate_write_cache);

1947
static struct configfs_attribute *tcmu_attrib_attrs[] = {
1948
	&tcmu_attr_cmd_time_out,
1949
	&tcmu_attr_dev_config,
1950 1951
	&tcmu_attr_dev_size,
	&tcmu_attr_emulate_write_cache,
1952
	&tcmu_attr_nl_reply_supported,
1953 1954 1955
	NULL,
};

1956 1957 1958
static struct configfs_attribute **tcmu_attrs;

static struct target_backend_ops tcmu_ops = {
1959 1960
	.name			= "user",
	.owner			= THIS_MODULE,
1961
	.transport_flags	= TRANSPORT_FLAG_PASSTHROUGH,
1962 1963 1964 1965
	.attach_hba		= tcmu_attach_hba,
	.detach_hba		= tcmu_detach_hba,
	.alloc_device		= tcmu_alloc_device,
	.configure_device	= tcmu_configure_device,
1966
	.destroy_device		= tcmu_destroy_device,
1967 1968 1969 1970 1971 1972
	.free_device		= tcmu_free_device,
	.parse_cdb		= tcmu_parse_cdb,
	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
	.get_device_type	= sbc_get_device_type,
	.get_blocks		= tcmu_get_blocks,
1973
	.tb_dev_attrib_attrs	= NULL,
1974 1975
};

M
Mike Christie 已提交
1976 1977

static void find_free_blocks(void)
1978 1979 1980 1981 1982
{
	struct tcmu_dev *udev;
	loff_t off;
	uint32_t start, end, block;

M
Mike Christie 已提交
1983 1984 1985
	mutex_lock(&root_udev_mutex);
	list_for_each_entry(udev, &root_udev, node) {
		mutex_lock(&udev->cmdr_lock);
1986

M
Mike Christie 已提交
1987 1988
		/* Try to complete the finished commands first */
		tcmu_handle_completions(udev);
1989

M
Mike Christie 已提交
1990 1991 1992 1993 1994
		/* Skip the udevs waiting the global pool or in idle */
		if (udev->waiting_global || !udev->dbi_thresh) {
			mutex_unlock(&udev->cmdr_lock);
			continue;
		}
1995

M
Mike Christie 已提交
1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
		end = udev->dbi_max + 1;
		block = find_last_bit(udev->data_bitmap, end);
		if (block == udev->dbi_max) {
			/*
			 * The last bit is dbi_max, so there is
			 * no need to shrink any blocks.
			 */
			mutex_unlock(&udev->cmdr_lock);
			continue;
		} else if (block == end) {
			/* The current udev will goto idle state */
			udev->dbi_thresh = start = 0;
			udev->dbi_max = 0;
		} else {
			udev->dbi_thresh = start = block + 1;
			udev->dbi_max = block;
		}
2013

M
Mike Christie 已提交
2014 2015 2016
		/* Here will truncate the data area from off */
		off = udev->data_off + start * DATA_BLOCK_SIZE;
		unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2017

M
Mike Christie 已提交
2018 2019 2020 2021 2022 2023
		/* Release the block pages */
		tcmu_blocks_release(&udev->data_blocks, start, end);
		mutex_unlock(&udev->cmdr_lock);
	}
	mutex_unlock(&root_udev_mutex);
}
2024

M
Mike Christie 已提交
2025 2026 2027
static void run_cmdr_queues(void)
{
	struct tcmu_dev *udev;
2028

M
Mike Christie 已提交
2029 2030 2031 2032 2033 2034 2035 2036
	/*
	 * Try to wake up the udevs who are waiting
	 * for the global data block pool.
	 */
	mutex_lock(&root_udev_mutex);
	list_for_each_entry(udev, &root_udev, node) {
		mutex_lock(&udev->cmdr_lock);
		if (!udev->waiting_global) {
2037
			mutex_unlock(&udev->cmdr_lock);
M
Mike Christie 已提交
2038
			break;
2039
		}
M
Mike Christie 已提交
2040
		mutex_unlock(&udev->cmdr_lock);
2041

M
Mike Christie 已提交
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
		wake_up(&udev->wait_cmdr);
	}
	mutex_unlock(&root_udev_mutex);
}

static int unmap_thread_fn(void *data)
{
	while (!kthread_should_stop()) {
		DEFINE_WAIT(__wait);

		prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
		schedule();
		finish_wait(&unmap_wait, &__wait);

		if (kthread_should_stop())
			break;

		find_free_blocks();
		run_cmdr_queues();
2061 2062 2063 2064 2065
	}

	return 0;
}

2066 2067
static int __init tcmu_module_init(void)
{
2068
	int ret, i, k, len = 0;
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089

	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);

	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
				sizeof(struct tcmu_cmd),
				__alignof__(struct tcmu_cmd),
				0, NULL);
	if (!tcmu_cmd_cache)
		return -ENOMEM;

	tcmu_root_device = root_device_register("tcm_user");
	if (IS_ERR(tcmu_root_device)) {
		ret = PTR_ERR(tcmu_root_device);
		goto out_free_cache;
	}

	ret = genl_register_family(&tcmu_genl_family);
	if (ret < 0) {
		goto out_unreg_device;
	}

2090 2091 2092
	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
		len += sizeof(struct configfs_attribute *);
	}
2093 2094 2095 2096
	for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
		len += sizeof(struct configfs_attribute *);
	}
	len += sizeof(struct configfs_attribute *);
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106

	tcmu_attrs = kzalloc(len, GFP_KERNEL);
	if (!tcmu_attrs) {
		ret = -ENOMEM;
		goto out_unreg_genl;
	}

	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
		tcmu_attrs[i] = passthrough_attrib_attrs[i];
	}
2107 2108 2109 2110
	for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
		tcmu_attrs[i] = tcmu_attrib_attrs[k];
		i++;
	}
2111 2112
	tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;

2113
	ret = transport_backend_register(&tcmu_ops);
2114
	if (ret)
2115
		goto out_attrs;
2116

2117 2118 2119 2120 2121 2122 2123
	init_waitqueue_head(&unmap_wait);
	unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap");
	if (IS_ERR(unmap_thread)) {
		ret = PTR_ERR(unmap_thread);
		goto out_unreg_transport;
	}

2124 2125
	return 0;

2126 2127
out_unreg_transport:
	target_backend_unregister(&tcmu_ops);
2128 2129
out_attrs:
	kfree(tcmu_attrs);
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
out_unreg_genl:
	genl_unregister_family(&tcmu_genl_family);
out_unreg_device:
	root_device_unregister(tcmu_root_device);
out_free_cache:
	kmem_cache_destroy(tcmu_cmd_cache);

	return ret;
}

static void __exit tcmu_module_exit(void)
{
2142
	kthread_stop(unmap_thread);
2143
	target_backend_unregister(&tcmu_ops);
2144
	kfree(tcmu_attrs);
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
	genl_unregister_family(&tcmu_genl_family);
	root_device_unregister(tcmu_root_device);
	kmem_cache_destroy(tcmu_cmd_cache);
}

MODULE_DESCRIPTION("TCM USER subsystem plugin");
MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
MODULE_LICENSE("GPL");

module_init(tcmu_module_init);
module_exit(tcmu_module_exit);