target_core_device.c 28.0 KB
Newer Older
1 2 3
/*******************************************************************************
 * Filename:  target_core_device.c (based on iscsi_target_device.c)
 *
4
 * This file contains the TCM Virtual Device and Disk Transport
5 6
 * agnostic related functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
35
#include <linux/export.h>
36
#include <asm/unaligned.h>
37 38 39
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
40
#include <scsi/scsi_device.h>
41 42

#include <target/target_core_base.h>
43 44
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
45

C
Christoph Hellwig 已提交
46
#include "target_core_internal.h"
47 48 49 50
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

51 52 53
DEFINE_MUTEX(g_device_mutex);
LIST_HEAD(g_device_list);

54 55 56 57
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

58 59
sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
60 61
{
	struct se_lun *se_lun = NULL;
62
	struct se_session *se_sess = se_cmd->se_sess;
63 64
	struct se_node_acl *nacl = se_sess->se_node_acl;
	struct se_dev_entry *deve;
65

66 67
	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
		return TCM_NON_EXISTENT_LUN;
68

69 70 71 72
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		atomic_long_inc(&deve->total_cmds);
73 74 75

		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
76
			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
77 78 79
				" Access for 0x%08x\n",
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
80
			rcu_read_unlock();
81
			return TCM_WRITE_PROTECTED;
82
		}
83 84

		if (se_cmd->data_direction == DMA_TO_DEVICE)
85 86
			atomic_long_add(se_cmd->data_length,
					&deve->write_bytes);
87
		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
88 89
			atomic_long_add(se_cmd->data_length,
					&deve->read_bytes);
90

91 92
		se_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
93 94 95
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
96 97 98

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
99
	}
100
	rcu_read_unlock();
101 102

	if (!se_lun) {
103 104 105 106 107 108
		/*
		 * Use the se_portal_group->tpg_virt_lun0 to allow for
		 * REPORT_LUNS, et al to be returned when no active
		 * MappedLUN=0 exists for this Initiator Port.
		 */
		if (unpacked_lun != 0) {
109
			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
110
				" Access for 0x%08x\n",
111
				se_cmd->se_tfo->get_fabric_name(),
112
				unpacked_lun);
113
			return TCM_NON_EXISTENT_LUN;
114 115 116 117 118
		}
		/*
		 * Force WRITE PROTECT for virtual LUN 0
		 */
		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
119 120
		    (se_cmd->data_direction != DMA_NONE))
			return TCM_WRITE_PROTECTED;
121

122 123
		se_lun = se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
124 125
		se_cmd->orig_fe_lun = 0;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
126 127 128

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
129
	}
130 131 132 133 134 135 136 137
	/*
	 * RCU reference protected by percpu se_lun->lun_ref taken above that
	 * must drop to zero (including initial reference) before this se_lun
	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
	 * target_core_fabric_configfs.c:target_fabric_port_release
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	atomic_long_inc(&se_cmd->se_dev->num_cmds);
138 139

	if (se_cmd->data_direction == DMA_TO_DEVICE)
140 141
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->write_bytes);
142
	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
143 144
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->read_bytes);
145 146 147

	return 0;
}
148
EXPORT_SYMBOL(transport_lookup_cmd_lun);
149

150
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
151 152 153
{
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
154
	struct se_session *se_sess = se_cmd->se_sess;
155
	struct se_node_acl *nacl = se_sess->se_node_acl;
156
	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
157
	unsigned long flags;
158

159
	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
160
		return -ENODEV;
161

162 163 164 165 166 167
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
		se_lun = rcu_dereference(deve->se_lun);
168 169 170
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
	}
171
	rcu_read_unlock();
172 173

	if (!se_lun) {
174
		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
175
			" Access for 0x%08x\n",
176
			se_cmd->se_tfo->get_fabric_name(),
177
			unpacked_lun);
178
		return -ENODEV;
179
	}
180 181 182 183 184
	/*
	 * XXX: Add percpu se_lun->lun_ref reference count for TMR
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
185

186
	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
187
	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
188
	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
189 190 191

	return 0;
}
192
EXPORT_SYMBOL(transport_lookup_tmr_lun);
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
	struct se_session *se_sess = cmd->se_sess;
	struct se_dev_entry *deve;
	bool ret;

	if (cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY)
		return true;

	rcu_read_lock();
	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);

212 213
/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
214
 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
215 216 217 218 219 220 221 222 223 224
 * when a matching rtpi is found.
 */
struct se_dev_entry *core_get_se_deve_from_rtpi(
	struct se_node_acl *nacl,
	u16 rtpi)
{
	struct se_dev_entry *deve;
	struct se_lun *lun;
	struct se_portal_group *tpg = nacl->se_tpg;

225 226 227
	rcu_read_lock();
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		lun = rcu_dereference(deve->se_lun);
228 229
		if (!lun) {
			pr_err("%s device entries device pointer is"
230
				" NULL, but Initiator has access.\n",
231
				tpg->se_tpg_tfo->get_fabric_name());
232 233
			continue;
		}
234
		if (lun->lun_rtpi != rtpi)
235 236
			continue;

237 238
		kref_get(&deve->pr_kref);
		rcu_read_unlock();
239 240 241

		return deve;
	}
242
	rcu_read_unlock();
243 244 245 246

	return NULL;
}

247
void core_free_device_list_for_node(
248 249 250 251 252
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
	struct se_dev_entry *deve;

253 254 255 256 257
	mutex_lock(&nacl->lun_entry_mutex);
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
258
	}
259
	mutex_unlock(&nacl->lun_entry_mutex);
260 261 262 263 264 265 266 267 268
}

void core_update_device_list_access(
	u32 mapped_lun,
	u32 lun_access,
	struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;

269 270 271 272 273 274 275 276 277 278
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, mapped_lun);
	if (deve) {
		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
		} else {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
		}
279
	}
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	mutex_unlock(&nacl->lun_entry_mutex);
}

/*
 * Called with rcu_read_lock or nacl->device_list_lock held.
 */
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u32 mapped_lun)
{
	struct se_dev_entry *deve;

	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
		if (deve->mapped_lun == mapped_lun)
			return deve;

	return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);

void target_pr_kref_release(struct kref *kref)
{
	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
						 pr_kref);
	complete(&deve->pr_comp);
303 304
}

305
/*      core_enable_device_list_for_node():
306 307 308
 *
 *
 */
309
int core_enable_device_list_for_node(
310 311 312 313 314
	struct se_lun *lun,
	struct se_lun_acl *lun_acl,
	u32 mapped_lun,
	u32 lun_access,
	struct se_node_acl *nacl,
315
	struct se_portal_group *tpg)
316
{
317 318 319 320 321 322 323 324 325 326 327
	struct se_dev_entry *orig, *new;

	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new) {
		pr_err("Unable to allocate se_dev_entry memory\n");
		return -ENOMEM;
	}

	atomic_set(&new->ua_count, 0);
	spin_lock_init(&new->ua_lock);
	INIT_LIST_HEAD(&new->ua_list);
328
	INIT_LIST_HEAD(&new->lun_link);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353

	new->mapped_lun = mapped_lun;
	kref_init(&new->pr_kref);
	init_completion(&new->pr_comp);

	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
	else
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;

	new->creation_time = get_jiffies_64();
	new->attach_count++;

	mutex_lock(&nacl->lun_entry_mutex);
	orig = target_nacl_find_deve(nacl, mapped_lun);
	if (orig && orig->se_lun) {
		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));

		if (orig_lun != lun) {
			pr_err("Existing orig->se_lun doesn't match new lun"
			       " for dynamic -> explicit NodeACL conversion:"
				" %s\n", nacl->initiatorname);
			mutex_unlock(&nacl->lun_entry_mutex);
			kfree(new);
354
			return -EINVAL;
355
		}
356
		BUG_ON(orig->se_lun_acl != NULL);
357

358 359 360 361 362
		rcu_assign_pointer(new->se_lun, lun);
		rcu_assign_pointer(new->se_lun_acl, lun_acl);
		hlist_del_rcu(&orig->link);
		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
		mutex_unlock(&nacl->lun_entry_mutex);
363

364 365 366 367
		spin_lock_bh(&lun->lun_deve_lock);
		list_del(&orig->lun_link);
		list_add_tail(&new->lun_link, &lun->lun_deve_list);
		spin_unlock_bh(&lun->lun_deve_lock);
368

369 370
		kref_put(&orig->pr_kref, target_pr_kref_release);
		wait_for_completion(&orig->pr_comp);
371

372 373
		kfree_rcu(orig, rcu_head);
		return 0;
374
	}
375

376 377 378 379
	rcu_assign_pointer(new->se_lun, lun);
	rcu_assign_pointer(new->se_lun_acl, lun_acl);
	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
	mutex_unlock(&nacl->lun_entry_mutex);
380

381 382 383
	spin_lock_bh(&lun->lun_deve_lock);
	list_add_tail(&new->lun_link, &lun->lun_deve_list);
	spin_unlock_bh(&lun->lun_deve_lock);
384 385 386 387

	return 0;
}

388 389
/*
 *	Called with se_node_acl->lun_entry_mutex held.
390
 */
391
void core_disable_device_list_for_node(
392
	struct se_lun *lun,
393
	struct se_dev_entry *orig,
394 395 396
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
397 398 399 400 401
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
402 403
	/*
	 * If the MappedLUN entry is being disabled, the entry in
404
	 * lun->lun_deve_list must be removed now before clearing the
405 406 407 408 409
	 * struct se_dev_entry pointers below as logic in
	 * core_alua_do_transition_tg_pt() depends on these being present.
	 *
	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
	 * that have not been explicitly converted to MappedLUNs ->
410 411
	 * struct se_lun_acl, but we remove deve->lun_link from
	 * lun->lun_deve_list. This also means that active UAs and
412 413 414
	 * NodeACL context specific PR metadata for demo-mode
	 * MappedLUN *deve will be released below..
	 */
415 416 417
	spin_lock_bh(&lun->lun_deve_lock);
	list_del(&orig->lun_link);
	spin_unlock_bh(&lun->lun_deve_lock);
418
	/*
419
	 * Disable struct se_dev_entry LUN ACL mapping
420
	 */
421 422 423
	core_scsi3_ua_release_all(orig);

	hlist_del_rcu(&orig->link);
424
	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
425 426 427 428 429
	rcu_assign_pointer(orig->se_lun, NULL);
	rcu_assign_pointer(orig->se_lun_acl, NULL);
	orig->lun_flags = 0;
	orig->creation_time = 0;
	orig->attach_count--;
430
	/*
431 432
	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
	 * or REGISTER_AND_MOVE PR operation to complete.
433
	 */
434 435 436 437
	kref_put(&orig->pr_kref, target_pr_kref_release);
	wait_for_completion(&orig->pr_comp);

	kfree_rcu(orig, rcu_head);
438

439
	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
440 441 442 443 444 445 446 447 448 449 450
}

/*      core_clear_lun_from_tpg():
 *
 *
 */
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
	struct se_node_acl *nacl;
	struct se_dev_entry *deve;

451
	mutex_lock(&tpg->acl_node_mutex);
452 453
	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {

454 455 456 457
		mutex_lock(&nacl->lun_entry_mutex);
		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
458

459 460
			if (lun != tmp_lun)
				continue;
461

462
			core_disable_device_list_for_node(lun, deve, nacl, tpg);
463
		}
464
		mutex_unlock(&nacl->lun_entry_mutex);
465
	}
466
	mutex_unlock(&tpg->acl_node_mutex);
467 468
}

469
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
470
{
471
	struct se_lun *tmp;
472 473

	spin_lock(&dev->se_port_lock);
474
	if (dev->export_count == 0x0000ffff) {
475
		pr_warn("Reached dev->dev_port_count =="
476 477
				" 0x0000ffff\n");
		spin_unlock(&dev->se_port_lock);
478
		return -ENOSPC;
479 480 481
	}
again:
	/*
482
	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
483 484 485 486 487 488 489 490 491 492
	 * Here is the table from spc4r17 section 7.7.3.8.
	 *
	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
	 *
	 * Code      Description
	 * 0h        Reserved
	 * 1h        Relative port 1, historically known as port A
	 * 2h        Relative port 2, historically known as port B
	 * 3h to FFFFh    Relative port 3 through 65 535
	 */
493 494
	lun->lun_rtpi = dev->dev_rpti_counter++;
	if (!lun->lun_rtpi)
495 496
		goto again;

497
	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
498
		/*
499
		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
500 501
		 * for 16-bit wrap..
		 */
502
		if (lun->lun_rtpi == tmp->lun_rtpi)
503 504 505 506 507 508 509
			goto again;
	}
	spin_unlock(&dev->se_port_lock);

	return 0;
}

510
static void se_release_vpd_for_dev(struct se_device *dev)
511 512 513
{
	struct t10_vpd *vpd, *vpd_tmp;

514
	spin_lock(&dev->t10_wwn.t10_vpd_lock);
515
	list_for_each_entry_safe(vpd, vpd_tmp,
516
			&dev->t10_wwn.t10_vpd_list, vpd_list) {
517 518 519
		list_del(&vpd->vpd_list);
		kfree(vpd);
	}
520
	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
521 522
}

523
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
524
{
525 526
	u32 aligned_max_sectors;
	u32 alignment;
527 528 529 530
	/*
	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
	 * transport_allocate_data_tasks() operation.
	 */
531 532 533 534 535 536
	alignment = max(1ul, PAGE_SIZE / block_size);
	aligned_max_sectors = rounddown(max_sectors, alignment);

	if (max_sectors != aligned_max_sectors)
		pr_info("Rounding down aligned max_sectors from %u to %u\n",
			max_sectors, aligned_max_sectors);
537

538
	return aligned_max_sectors;
539 540
}

541
int core_dev_add_lun(
542 543
	struct se_portal_group *tpg,
	struct se_device *dev,
544
	struct se_lun *lun)
545
{
546
	int rc;
547

548
	rc = core_tpg_add_lun(tpg, lun,
549
				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
550
	if (rc < 0)
551
		return rc;
552

553
	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
554
		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
555
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
556
		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
557 558 559 560
	/*
	 * Update LUN maps for dynamically added initiators when
	 * generate_node_acl is enabled.
	 */
561
	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
562
		struct se_node_acl *acl;
563 564

		mutex_lock(&tpg->acl_node_mutex);
565
		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
566 567 568
			if (acl->dynamic_node_acl &&
			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
569
				core_tpg_add_node_to_devs(acl, tpg, lun);
570 571
			}
		}
572
		mutex_unlock(&tpg->acl_node_mutex);
573 574
	}

575
	return 0;
576 577 578 579 580 581
}

/*      core_dev_del_lun():
 *
 *
 */
582
void core_dev_del_lun(
583
	struct se_portal_group *tpg,
584
	struct se_lun *lun)
585
{
586
	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
587
		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
588
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
589
		tpg->se_tpg_tfo->get_fabric_name());
590

591
	core_tpg_remove_lun(tpg, lun);
592 593 594 595
}

struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
	struct se_portal_group *tpg,
596
	struct se_node_acl *nacl,
597 598 599 600 601
	u32 mapped_lun,
	int *ret)
{
	struct se_lun_acl *lacl;

602
	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
603
		pr_err("%s InitiatorName exceeds maximum size.\n",
604
			tpg->se_tpg_tfo->get_fabric_name());
605 606 607 608
		*ret = -EOVERFLOW;
		return NULL;
	}
	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
609 610
	if (!lacl) {
		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
611 612 613 614 615 616
		*ret = -ENOMEM;
		return NULL;
	}

	lacl->mapped_lun = mapped_lun;
	lacl->se_lun_nacl = nacl;
617 618
	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
		 nacl->initiatorname);
619 620 621 622 623 624 625

	return lacl;
}

int core_dev_add_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl,
626
	struct se_lun *lun,
627 628
	u32 lun_access)
{
629
	struct se_node_acl *nacl = lacl->se_lun_nacl;
630 631 632 633 634
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
635

636
	if (!nacl)
637 638 639 640 641 642 643 644
		return -EINVAL;

	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;

	lacl->se_lun = lun;

645 646
	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
			lun_access, nacl, tpg) < 0)
647 648
		return -EINVAL;

649
	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
650
		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
651
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
652 653 654 655 656 657
		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
		lacl->initiatorname);
	/*
	 * Check to see if there are any existing persistent reservation APTPL
	 * pre-registrations that need to be enabled for this LUN ACL..
	 */
658
	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
659
					    lacl->mapped_lun);
660 661 662 663 664 665 666
	return 0;
}

int core_dev_del_initiator_node_lun_acl(
	struct se_lun *lun,
	struct se_lun_acl *lacl)
{
667
	struct se_portal_group *tpg = lun->lun_tpg;
668
	struct se_node_acl *nacl;
669
	struct se_dev_entry *deve;
670 671

	nacl = lacl->se_lun_nacl;
672
	if (!nacl)
673 674
		return -EINVAL;

675 676 677 678 679
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (deve)
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
	mutex_unlock(&nacl->lun_entry_mutex);
680

681
	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
682
		" InitiatorNode: %s Mapped LUN: %u\n",
683 684
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
685 686 687 688 689 690 691 692 693
		lacl->initiatorname, lacl->mapped_lun);

	return 0;
}

void core_dev_free_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl)
{
694
	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
695 696 697
		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		tpg->se_tpg_tfo->get_fabric_name(),
698 699 700 701 702
		lacl->initiatorname, lacl->mapped_lun);

	kfree(lacl);
}

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
static void scsi_dump_inquiry(struct se_device *dev)
{
	struct t10_wwn *wwn = &dev->t10_wwn;
	char buf[17];
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
			buf[i] = wwn->vendor[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
			buf[i] = wwn->model[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
			buf[i] = wwn->revision[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);

	device_type = dev->transport->get_device_type(dev);
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
}

struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
	struct se_device *dev;
742
	struct se_lun *xcopy_lun;
743

744
	dev = hba->backend->ops->alloc_device(hba, name);
745 746 747
	if (!dev)
		return NULL;

748
	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
749
	dev->se_hba = hba;
750
	dev->transport = hba->backend->ops;
751
	dev->prot_length = sizeof(struct se_dif_v1_tuple);
752
	dev->hba_index = hba->hba_index;
753 754 755 756 757 758 759

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_list);
	INIT_LIST_HEAD(&dev->qf_cmd_list);
760
	INIT_LIST_HEAD(&dev->g_dev_node);
761 762 763 764 765 766
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
	spin_lock_init(&dev->qf_cmd_lock);
767
	sema_init(&dev->caw_sem, 1);
768 769 770 771 772 773 774 775 776
	atomic_set(&dev->dev_ordered_id, 0);
	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
	spin_lock_init(&dev->t10_pr.registration_lock);
	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
777 778
	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
	spin_lock_init(&dev->t10_alua.lba_map_lock);
779 780 781 782 783

	dev->t10_wwn.t10_dev = dev;
	dev->t10_alua.t10_dev = dev;

	dev->dev_attrib.da_dev = dev;
784
	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
785 786 787
	dev->dev_attrib.emulate_dpo = 1;
	dev->dev_attrib.emulate_fua_write = 1;
	dev->dev_attrib.emulate_fua_read = 1;
788 789 790 791 792
	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
793
	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
794
	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
795
	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
796
	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
797
	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
798 799 800 801 802 803 804 805
	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
	dev->dev_attrib.max_unmap_block_desc_count =
		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
	dev->dev_attrib.unmap_granularity_alignment =
				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
806
	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
807

808
	xcopy_lun = &dev->xcopy_lun;
809
	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
810
	init_completion(&xcopy_lun->lun_ref_comp);
811 812 813 814
	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
815

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	return dev;
}

int target_configure_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;
	int ret;

	if (dev->dev_flags & DF_CONFIGURED) {
		pr_err("se_dev->se_dev_ptr already set for storage"
				" object\n");
		return -EEXIST;
	}

	ret = dev->transport->configure_device(dev);
	if (ret)
		goto out;
	/*
	 * XXX: there is not much point to have two different values here..
	 */
	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;

	/*
	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
	 */
	dev->dev_attrib.hw_max_sectors =
		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
					 dev->dev_attrib.hw_block_size);
845
	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();

	ret = core_setup_alua(dev);
	if (ret)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
				      dev->transport->name);
	if (!dev->tmr_wq) {
		pr_err("Unable to create tmr workqueue for %s\n",
			dev->transport->name);
		ret = -ENOMEM;
		goto out_free_alua;
	}

	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);

	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 */
876
	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
877 878 879 880 881 882 883 884 885 886 887 888
		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->t10_wwn.model[0],
			dev->transport->inquiry_prod, 16);
		strncpy(&dev->t10_wwn.revision[0],
			dev->transport->inquiry_rev, 4);
	}

	scsi_dump_inquiry(dev);

	spin_lock(&hba->device_lock);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
889 890 891 892 893

	mutex_lock(&g_device_mutex);
	list_add_tail(&dev->g_dev_node, &g_device_list);
	mutex_unlock(&g_device_mutex);

894 895
	dev->dev_flags |= DF_CONFIGURED;

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
	return 0;

out_free_alua:
	core_alua_free_lu_gp_mem(dev);
out:
	se_release_vpd_for_dev(dev);
	return ret;
}

void target_free_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;

	WARN_ON(!list_empty(&dev->dev_sep_list));

	if (dev->dev_flags & DF_CONFIGURED) {
		destroy_workqueue(dev->tmr_wq);

914 915 916 917
		mutex_lock(&g_device_mutex);
		list_del(&dev->g_dev_node);
		mutex_unlock(&g_device_mutex);

918 919 920 921 922 923
		spin_lock(&hba->device_lock);
		hba->dev_count--;
		spin_unlock(&hba->device_lock);
	}

	core_alua_free_lu_gp_mem(dev);
924
	core_alua_set_lba_map(dev, NULL, 0, 0);
925 926 927
	core_scsi3_free_all_registrations(dev);
	se_release_vpd_for_dev(dev);

928 929 930
	if (dev->transport->free_prot)
		dev->transport->free_prot(dev);

931 932 933
	dev->transport->free_device(dev);
}

934 935 936 937
int core_dev_setup_virtual_lun0(void)
{
	struct se_hba *hba;
	struct se_device *dev;
938
	char buf[] = "rd_pages=8,rd_nullio=1";
939 940
	int ret;

941
	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
942 943 944
	if (IS_ERR(hba))
		return PTR_ERR(hba);

945 946
	dev = target_alloc_device(hba, "virt_lun0");
	if (!dev) {
947
		ret = -ENOMEM;
948
		goto out_free_hba;
949 950
	}

951
	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
952

953 954 955
	ret = target_configure_device(dev);
	if (ret)
		goto out_free_se_dev;
956

957 958
	lun0_hba = hba;
	g_lun0_dev = dev;
959
	return 0;
960 961 962 963 964

out_free_se_dev:
	target_free_device(dev);
out_free_hba:
	core_delete_hba(hba);
965 966 967 968 969 970
	return ret;
}


void core_dev_release_virtual_lun0(void)
{
971
	struct se_hba *hba = lun0_hba;
972

973
	if (!hba)
974 975
		return;

976
	if (g_lun0_dev)
977
		target_free_device(g_lun0_dev);
978 979
	core_delete_hba(hba);
}
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

/*
 * Common CDB parsing for kernel and user passthrough.
 */
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
	unsigned char *cdb = cmd->t_task_cdb;

	/*
	 * Clear a lun set in the cdb if the initiator talking to use spoke
	 * and old standards version, as we can't assume the underlying device
	 * won't choke up on it.
	 */
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}

	/*
	 * For REPORT LUNS we always need to emulate the response, for everything
	 * else, pass it up.
	 */
	if (cdb[0] == REPORT_LUNS) {
		cmd->execute_cmd = spc_emulate_report_luns;
		return TCM_NO_SENSE;
	}

	/* Set DATA_CDB flag for ops that should have it */
	switch (cdb[0]) {
	case READ_6:
	case READ_10:
	case READ_12:
	case READ_16:
	case WRITE_6:
	case WRITE_10:
	case WRITE_12:
	case WRITE_16:
	case WRITE_VERIFY:
	case WRITE_VERIFY_12:
	case 0x8e: /* WRITE_VERIFY_16 */
	case COMPARE_AND_WRITE:
	case XDWRITEREAD_10:
		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
		break;
	case VARIABLE_LENGTH_CMD:
		switch (get_unaligned_be16(&cdb[8])) {
		case READ_32:
		case WRITE_32:
		case 0x0c: /* WRITE_VERIFY_32 */
		case XDWRITEREAD_32:
			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
			break;
		}
	}

	cmd->execute_cmd = exec_cmd;

	return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);