target_core_device.c 28.2 KB
Newer Older
1 2 3
/*******************************************************************************
 * Filename:  target_core_device.c (based on iscsi_target_device.c)
 *
4
 * This file contains the TCM Virtual Device and Disk Transport
5 6
 * agnostic related functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
35
#include <linux/export.h>
36
#include <asm/unaligned.h>
37 38
#include <net/sock.h>
#include <net/tcp.h>
39 40
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
41 42

#include <target/target_core_base.h>
43 44
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
45

C
Christoph Hellwig 已提交
46
#include "target_core_internal.h"
47 48 49 50
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

51 52 53
DEFINE_MUTEX(g_device_mutex);
LIST_HEAD(g_device_list);

54 55 56 57
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

58
sense_reason_t
H
Hannes Reinecke 已提交
59
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
60 61
{
	struct se_lun *se_lun = NULL;
62
	struct se_session *se_sess = se_cmd->se_sess;
63 64
	struct se_node_acl *nacl = se_sess->se_node_acl;
	struct se_dev_entry *deve;
65

66 67 68 69
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		atomic_long_inc(&deve->total_cmds);
70 71 72

		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
73
			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
H
Hannes Reinecke 已提交
74
				" Access for 0x%08llx\n",
75 76
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
77
			rcu_read_unlock();
78
			return TCM_WRITE_PROTECTED;
79
		}
80 81

		if (se_cmd->data_direction == DMA_TO_DEVICE)
82 83
			atomic_long_add(se_cmd->data_length,
					&deve->write_bytes);
84
		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
85 86
			atomic_long_add(se_cmd->data_length,
					&deve->read_bytes);
87

88 89
		se_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
90 91 92
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
93 94 95

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
96
	}
97
	rcu_read_unlock();
98 99

	if (!se_lun) {
100 101 102 103 104 105
		/*
		 * Use the se_portal_group->tpg_virt_lun0 to allow for
		 * REPORT_LUNS, et al to be returned when no active
		 * MappedLUN=0 exists for this Initiator Port.
		 */
		if (unpacked_lun != 0) {
106
			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
107
				" Access for 0x%08llx\n",
108
				se_cmd->se_tfo->get_fabric_name(),
109
				unpacked_lun);
110
			return TCM_NON_EXISTENT_LUN;
111 112 113 114 115
		}
		/*
		 * Force WRITE PROTECT for virtual LUN 0
		 */
		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
116 117
		    (se_cmd->data_direction != DMA_NONE))
			return TCM_WRITE_PROTECTED;
118

119 120
		se_lun = se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
121 122
		se_cmd->orig_fe_lun = 0;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
123 124 125

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
126
	}
127 128 129 130 131 132 133 134
	/*
	 * RCU reference protected by percpu se_lun->lun_ref taken above that
	 * must drop to zero (including initial reference) before this se_lun
	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
	 * target_core_fabric_configfs.c:target_fabric_port_release
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 136

	if (se_cmd->data_direction == DMA_TO_DEVICE)
137 138
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->write_bytes);
139
	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 141
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->read_bytes);
142 143 144

	return 0;
}
145
EXPORT_SYMBOL(transport_lookup_cmd_lun);
146

H
Hannes Reinecke 已提交
147
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
148 149 150
{
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
151
	struct se_session *se_sess = se_cmd->se_sess;
152
	struct se_node_acl *nacl = se_sess->se_node_acl;
153
	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
154
	unsigned long flags;
155

156 157 158 159 160 161
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
		se_lun = rcu_dereference(deve->se_lun);
162 163 164
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
	}
165
	rcu_read_unlock();
166 167

	if (!se_lun) {
168
		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
169
			" Access for 0x%08llx\n",
170
			se_cmd->se_tfo->get_fabric_name(),
171
			unpacked_lun);
172
		return -ENODEV;
173
	}
174 175 176 177 178
	/*
	 * XXX: Add percpu se_lun->lun_ref reference count for TMR
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179

180
	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
181
	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
182
	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
183 184 185

	return 0;
}
186
EXPORT_SYMBOL(transport_lookup_tmr_lun);
187

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
	struct se_session *se_sess = cmd->se_sess;
	struct se_dev_entry *deve;
	bool ret;

	rcu_read_lock();
	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);

203 204
/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
205
 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
206 207 208 209 210 211 212 213 214 215
 * when a matching rtpi is found.
 */
struct se_dev_entry *core_get_se_deve_from_rtpi(
	struct se_node_acl *nacl,
	u16 rtpi)
{
	struct se_dev_entry *deve;
	struct se_lun *lun;
	struct se_portal_group *tpg = nacl->se_tpg;

216 217 218
	rcu_read_lock();
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		lun = rcu_dereference(deve->se_lun);
219 220
		if (!lun) {
			pr_err("%s device entries device pointer is"
221
				" NULL, but Initiator has access.\n",
222
				tpg->se_tpg_tfo->get_fabric_name());
223 224
			continue;
		}
225
		if (lun->lun_rtpi != rtpi)
226 227
			continue;

228 229
		kref_get(&deve->pr_kref);
		rcu_read_unlock();
230 231 232

		return deve;
	}
233
	rcu_read_unlock();
234 235 236 237

	return NULL;
}

238
void core_free_device_list_for_node(
239 240 241 242 243
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
	struct se_dev_entry *deve;

244 245 246 247 248
	mutex_lock(&nacl->lun_entry_mutex);
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
249
	}
250
	mutex_unlock(&nacl->lun_entry_mutex);
251 252 253
}

void core_update_device_list_access(
H
Hannes Reinecke 已提交
254
	u64 mapped_lun,
255 256 257 258 259
	u32 lun_access,
	struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;

260 261 262 263 264 265 266 267 268 269
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, mapped_lun);
	if (deve) {
		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
		} else {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
		}
270
	}
271
	mutex_unlock(&nacl->lun_entry_mutex);
272 273
}

274 275
/*
 * Called with rcu_read_lock or nacl->device_list_lock held.
276
 */
H
Hannes Reinecke 已提交
277
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
{
	struct se_dev_entry *deve;

	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
		if (deve->mapped_lun == mapped_lun)
			return deve;

	return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);

void target_pr_kref_release(struct kref *kref)
{
	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
						 pr_kref);
	complete(&deve->pr_comp);
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
static void
target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
			     bool skip_new)
{
	struct se_dev_entry *tmp;

	rcu_read_lock();
	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
		if (skip_new && tmp == new)
			continue;
		core_scsi3_ua_allocate(tmp, 0x3F,
				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
	}
	rcu_read_unlock();
}

312
int core_enable_device_list_for_node(
313 314
	struct se_lun *lun,
	struct se_lun_acl *lun_acl,
H
Hannes Reinecke 已提交
315
	u64 mapped_lun,
316 317
	u32 lun_access,
	struct se_node_acl *nacl,
318
	struct se_portal_group *tpg)
319
{
320
	struct se_dev_entry *orig, *new;
321

322 323 324 325 326
	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new) {
		pr_err("Unable to allocate se_dev_entry memory\n");
		return -ENOMEM;
	}
327

328 329 330
	atomic_set(&new->ua_count, 0);
	spin_lock_init(&new->ua_lock);
	INIT_LIST_HEAD(&new->ua_list);
331
	INIT_LIST_HEAD(&new->lun_link);
332

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	new->mapped_lun = mapped_lun;
	kref_init(&new->pr_kref);
	init_completion(&new->pr_comp);

	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
	else
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;

	new->creation_time = get_jiffies_64();
	new->attach_count++;

	mutex_lock(&nacl->lun_entry_mutex);
	orig = target_nacl_find_deve(nacl, mapped_lun);
	if (orig && orig->se_lun) {
		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));

		if (orig_lun != lun) {
			pr_err("Existing orig->se_lun doesn't match new lun"
			       " for dynamic -> explicit NodeACL conversion:"
				" %s\n", nacl->initiatorname);
			mutex_unlock(&nacl->lun_entry_mutex);
			kfree(new);
357
			return -EINVAL;
358
		}
359
		BUG_ON(orig->se_lun_acl != NULL);
360

361 362 363 364 365
		rcu_assign_pointer(new->se_lun, lun);
		rcu_assign_pointer(new->se_lun_acl, lun_acl);
		hlist_del_rcu(&orig->link);
		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
		mutex_unlock(&nacl->lun_entry_mutex);
366

367
		spin_lock(&lun->lun_deve_lock);
368 369
		list_del(&orig->lun_link);
		list_add_tail(&new->lun_link, &lun->lun_deve_list);
370
		spin_unlock(&lun->lun_deve_lock);
371

372 373
		kref_put(&orig->pr_kref, target_pr_kref_release);
		wait_for_completion(&orig->pr_comp);
374

375
		target_luns_data_has_changed(nacl, new, true);
376 377
		kfree_rcu(orig, rcu_head);
		return 0;
378
	}
379

380 381 382 383
	rcu_assign_pointer(new->se_lun, lun);
	rcu_assign_pointer(new->se_lun_acl, lun_acl);
	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
	mutex_unlock(&nacl->lun_entry_mutex);
384

385
	spin_lock(&lun->lun_deve_lock);
386
	list_add_tail(&new->lun_link, &lun->lun_deve_list);
387
	spin_unlock(&lun->lun_deve_lock);
388

389
	target_luns_data_has_changed(nacl, new, true);
390 391 392
	return 0;
}

393 394
/*
 *	Called with se_node_acl->lun_entry_mutex held.
395
 */
396
void core_disable_device_list_for_node(
397
	struct se_lun *lun,
398
	struct se_dev_entry *orig,
399 400 401
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
402 403 404 405 406
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
407 408
	/*
	 * If the MappedLUN entry is being disabled, the entry in
409
	 * lun->lun_deve_list must be removed now before clearing the
410 411 412 413 414
	 * struct se_dev_entry pointers below as logic in
	 * core_alua_do_transition_tg_pt() depends on these being present.
	 *
	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
	 * that have not been explicitly converted to MappedLUNs ->
415 416
	 * struct se_lun_acl, but we remove deve->lun_link from
	 * lun->lun_deve_list. This also means that active UAs and
417 418 419
	 * NodeACL context specific PR metadata for demo-mode
	 * MappedLUN *deve will be released below..
	 */
420
	spin_lock(&lun->lun_deve_lock);
421
	list_del(&orig->lun_link);
422
	spin_unlock(&lun->lun_deve_lock);
423
	/*
424
	 * Disable struct se_dev_entry LUN ACL mapping
425
	 */
426 427 428
	core_scsi3_ua_release_all(orig);

	hlist_del_rcu(&orig->link);
429
	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
430 431 432 433 434
	rcu_assign_pointer(orig->se_lun, NULL);
	rcu_assign_pointer(orig->se_lun_acl, NULL);
	orig->lun_flags = 0;
	orig->creation_time = 0;
	orig->attach_count--;
435
	/*
436 437
	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
	 * or REGISTER_AND_MOVE PR operation to complete.
438
	 */
439 440 441 442
	kref_put(&orig->pr_kref, target_pr_kref_release);
	wait_for_completion(&orig->pr_comp);

	kfree_rcu(orig, rcu_head);
443

444
	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
445
	target_luns_data_has_changed(nacl, NULL, false);
446 447 448 449 450 451 452 453 454 455 456
}

/*      core_clear_lun_from_tpg():
 *
 *
 */
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
	struct se_node_acl *nacl;
	struct se_dev_entry *deve;

457
	mutex_lock(&tpg->acl_node_mutex);
458 459
	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {

460 461 462 463
		mutex_lock(&nacl->lun_entry_mutex);
		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
464

465 466
			if (lun != tmp_lun)
				continue;
467

468
			core_disable_device_list_for_node(lun, deve, nacl, tpg);
469
		}
470
		mutex_unlock(&nacl->lun_entry_mutex);
471
	}
472
	mutex_unlock(&tpg->acl_node_mutex);
473 474
}

475
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
476
{
477
	struct se_lun *tmp;
478 479

	spin_lock(&dev->se_port_lock);
480
	if (dev->export_count == 0x0000ffff) {
481
		pr_warn("Reached dev->dev_port_count =="
482 483
				" 0x0000ffff\n");
		spin_unlock(&dev->se_port_lock);
484
		return -ENOSPC;
485 486 487
	}
again:
	/*
488
	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
489 490 491 492 493 494 495 496 497 498
	 * Here is the table from spc4r17 section 7.7.3.8.
	 *
	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
	 *
	 * Code      Description
	 * 0h        Reserved
	 * 1h        Relative port 1, historically known as port A
	 * 2h        Relative port 2, historically known as port B
	 * 3h to FFFFh    Relative port 3 through 65 535
	 */
499 500
	lun->lun_rtpi = dev->dev_rpti_counter++;
	if (!lun->lun_rtpi)
501 502
		goto again;

503
	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
504
		/*
505
		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
506 507
		 * for 16-bit wrap..
		 */
508
		if (lun->lun_rtpi == tmp->lun_rtpi)
509 510 511 512 513 514 515
			goto again;
	}
	spin_unlock(&dev->se_port_lock);

	return 0;
}

516
static void se_release_vpd_for_dev(struct se_device *dev)
517 518 519
{
	struct t10_vpd *vpd, *vpd_tmp;

520
	spin_lock(&dev->t10_wwn.t10_vpd_lock);
521
	list_for_each_entry_safe(vpd, vpd_tmp,
522
			&dev->t10_wwn.t10_vpd_list, vpd_list) {
523 524 525
		list_del(&vpd->vpd_list);
		kfree(vpd);
	}
526
	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
527 528
}

529
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
530
{
531 532
	u32 aligned_max_sectors;
	u32 alignment;
533 534 535 536
	/*
	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
	 * transport_allocate_data_tasks() operation.
	 */
537 538 539 540 541 542
	alignment = max(1ul, PAGE_SIZE / block_size);
	aligned_max_sectors = rounddown(max_sectors, alignment);

	if (max_sectors != aligned_max_sectors)
		pr_info("Rounding down aligned max_sectors from %u to %u\n",
			max_sectors, aligned_max_sectors);
543

544
	return aligned_max_sectors;
545 546
}

547
int core_dev_add_lun(
548 549
	struct se_portal_group *tpg,
	struct se_device *dev,
550
	struct se_lun *lun)
551
{
552
	int rc;
553

554
	rc = core_tpg_add_lun(tpg, lun,
555
				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
556
	if (rc < 0)
557
		return rc;
558

H
Hannes Reinecke 已提交
559
	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
560
		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
561
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
562
		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
563 564 565 566
	/*
	 * Update LUN maps for dynamically added initiators when
	 * generate_node_acl is enabled.
	 */
567
	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
568
		struct se_node_acl *acl;
569 570

		mutex_lock(&tpg->acl_node_mutex);
571
		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
572 573 574
			if (acl->dynamic_node_acl &&
			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
575
				core_tpg_add_node_to_devs(acl, tpg, lun);
576 577
			}
		}
578
		mutex_unlock(&tpg->acl_node_mutex);
579 580
	}

581
	return 0;
582 583 584 585 586 587
}

/*      core_dev_del_lun():
 *
 *
 */
588
void core_dev_del_lun(
589
	struct se_portal_group *tpg,
590
	struct se_lun *lun)
591
{
H
Hannes Reinecke 已提交
592
	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
593
		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
594
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
595
		tpg->se_tpg_tfo->get_fabric_name());
596

597
	core_tpg_remove_lun(tpg, lun);
598 599 600 601
}

struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
	struct se_portal_group *tpg,
602
	struct se_node_acl *nacl,
H
Hannes Reinecke 已提交
603
	u64 mapped_lun,
604 605 606 607
	int *ret)
{
	struct se_lun_acl *lacl;

608
	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
609
		pr_err("%s InitiatorName exceeds maximum size.\n",
610
			tpg->se_tpg_tfo->get_fabric_name());
611 612 613 614
		*ret = -EOVERFLOW;
		return NULL;
	}
	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
615 616
	if (!lacl) {
		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
617 618 619 620 621 622
		*ret = -ENOMEM;
		return NULL;
	}

	lacl->mapped_lun = mapped_lun;
	lacl->se_lun_nacl = nacl;
623 624
	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
		 nacl->initiatorname);
625 626 627 628 629 630 631

	return lacl;
}

int core_dev_add_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl,
632
	struct se_lun *lun,
633 634
	u32 lun_access)
{
635
	struct se_node_acl *nacl = lacl->se_lun_nacl;
636 637 638 639 640
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
641

642
	if (!nacl)
643 644 645 646 647 648 649 650
		return -EINVAL;

	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;

	lacl->se_lun = lun;

651 652
	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
			lun_access, nacl, tpg) < 0)
653 654
		return -EINVAL;

H
Hannes Reinecke 已提交
655
	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
656
		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
657
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
658 659 660 661 662 663
		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
		lacl->initiatorname);
	/*
	 * Check to see if there are any existing persistent reservation APTPL
	 * pre-registrations that need to be enabled for this LUN ACL..
	 */
664
	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
665
					    lacl->mapped_lun);
666 667 668 669 670 671 672
	return 0;
}

int core_dev_del_initiator_node_lun_acl(
	struct se_lun *lun,
	struct se_lun_acl *lacl)
{
673
	struct se_portal_group *tpg = lun->lun_tpg;
674
	struct se_node_acl *nacl;
675
	struct se_dev_entry *deve;
676 677

	nacl = lacl->se_lun_nacl;
678
	if (!nacl)
679 680
		return -EINVAL;

681 682 683 684 685
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (deve)
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
	mutex_unlock(&nacl->lun_entry_mutex);
686

H
Hannes Reinecke 已提交
687 688
	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
		" InitiatorNode: %s Mapped LUN: %llu\n",
689 690
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
691 692 693 694 695 696 697 698 699
		lacl->initiatorname, lacl->mapped_lun);

	return 0;
}

void core_dev_free_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl)
{
700
	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
H
Hannes Reinecke 已提交
701
		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
702 703
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		tpg->se_tpg_tfo->get_fabric_name(),
704 705 706 707 708
		lacl->initiatorname, lacl->mapped_lun);

	kfree(lacl);
}

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
static void scsi_dump_inquiry(struct se_device *dev)
{
	struct t10_wwn *wwn = &dev->t10_wwn;
	char buf[17];
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
			buf[i] = wwn->vendor[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
			buf[i] = wwn->model[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
			buf[i] = wwn->revision[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);

	device_type = dev->transport->get_device_type(dev);
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
}

struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
	struct se_device *dev;
748
	struct se_lun *xcopy_lun;
749

750
	dev = hba->backend->ops->alloc_device(hba, name);
751 752 753
	if (!dev)
		return NULL;

754
	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
755
	dev->se_hba = hba;
756
	dev->transport = hba->backend->ops;
757
	dev->prot_length = sizeof(struct se_dif_v1_tuple);
758
	dev->hba_index = hba->hba_index;
759 760 761 762 763 764 765

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_list);
	INIT_LIST_HEAD(&dev->qf_cmd_list);
766
	INIT_LIST_HEAD(&dev->g_dev_node);
767 768 769 770 771 772
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
	spin_lock_init(&dev->qf_cmd_lock);
773
	sema_init(&dev->caw_sem, 1);
774 775 776 777 778 779 780 781 782
	atomic_set(&dev->dev_ordered_id, 0);
	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
	spin_lock_init(&dev->t10_pr.registration_lock);
	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
783 784
	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
	spin_lock_init(&dev->t10_alua.lba_map_lock);
785 786 787 788 789

	dev->t10_wwn.t10_dev = dev;
	dev->t10_alua.t10_dev = dev;

	dev->dev_attrib.da_dev = dev;
790
	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
791 792 793
	dev->dev_attrib.emulate_dpo = 1;
	dev->dev_attrib.emulate_fua_write = 1;
	dev->dev_attrib.emulate_fua_read = 1;
794 795 796 797 798
	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
799
	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
800
	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
801
	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
802
	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
803
	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
804 805 806 807 808 809 810 811
	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
	dev->dev_attrib.max_unmap_block_desc_count =
		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
	dev->dev_attrib.unmap_granularity_alignment =
				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
812
	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
813

814
	xcopy_lun = &dev->xcopy_lun;
815
	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
816
	init_completion(&xcopy_lun->lun_ref_comp);
817 818 819 820
	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
821

822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	return dev;
}

int target_configure_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;
	int ret;

	if (dev->dev_flags & DF_CONFIGURED) {
		pr_err("se_dev->se_dev_ptr already set for storage"
				" object\n");
		return -EEXIST;
	}

	ret = dev->transport->configure_device(dev);
	if (ret)
		goto out;
	/*
	 * XXX: there is not much point to have two different values here..
	 */
	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;

	/*
	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
	 */
	dev->dev_attrib.hw_max_sectors =
		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
					 dev->dev_attrib.hw_block_size);
851
	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();

	ret = core_setup_alua(dev);
	if (ret)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
				      dev->transport->name);
	if (!dev->tmr_wq) {
		pr_err("Unable to create tmr workqueue for %s\n",
			dev->transport->name);
		ret = -ENOMEM;
		goto out_free_alua;
	}

	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);

	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 */
882
	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
883 884 885 886 887 888 889 890 891 892 893 894
		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->t10_wwn.model[0],
			dev->transport->inquiry_prod, 16);
		strncpy(&dev->t10_wwn.revision[0],
			dev->transport->inquiry_rev, 4);
	}

	scsi_dump_inquiry(dev);

	spin_lock(&hba->device_lock);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
895 896 897 898 899

	mutex_lock(&g_device_mutex);
	list_add_tail(&dev->g_dev_node, &g_device_list);
	mutex_unlock(&g_device_mutex);

900 901
	dev->dev_flags |= DF_CONFIGURED;

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
	return 0;

out_free_alua:
	core_alua_free_lu_gp_mem(dev);
out:
	se_release_vpd_for_dev(dev);
	return ret;
}

void target_free_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;

	WARN_ON(!list_empty(&dev->dev_sep_list));

	if (dev->dev_flags & DF_CONFIGURED) {
		destroy_workqueue(dev->tmr_wq);

920 921 922 923
		mutex_lock(&g_device_mutex);
		list_del(&dev->g_dev_node);
		mutex_unlock(&g_device_mutex);

924 925 926 927 928 929
		spin_lock(&hba->device_lock);
		hba->dev_count--;
		spin_unlock(&hba->device_lock);
	}

	core_alua_free_lu_gp_mem(dev);
930
	core_alua_set_lba_map(dev, NULL, 0, 0);
931 932 933
	core_scsi3_free_all_registrations(dev);
	se_release_vpd_for_dev(dev);

934 935 936
	if (dev->transport->free_prot)
		dev->transport->free_prot(dev);

937 938 939
	dev->transport->free_device(dev);
}

940 941 942 943
int core_dev_setup_virtual_lun0(void)
{
	struct se_hba *hba;
	struct se_device *dev;
944
	char buf[] = "rd_pages=8,rd_nullio=1";
945 946
	int ret;

947
	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
948 949 950
	if (IS_ERR(hba))
		return PTR_ERR(hba);

951 952
	dev = target_alloc_device(hba, "virt_lun0");
	if (!dev) {
953
		ret = -ENOMEM;
954
		goto out_free_hba;
955 956
	}

957
	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
958

959 960 961
	ret = target_configure_device(dev);
	if (ret)
		goto out_free_se_dev;
962

963 964
	lun0_hba = hba;
	g_lun0_dev = dev;
965
	return 0;
966 967 968 969 970

out_free_se_dev:
	target_free_device(dev);
out_free_hba:
	core_delete_hba(hba);
971 972 973 974 975 976
	return ret;
}


void core_dev_release_virtual_lun0(void)
{
977
	struct se_hba *hba = lun0_hba;
978

979
	if (!hba)
980 981
		return;

982
	if (g_lun0_dev)
983
		target_free_device(g_lun0_dev);
984 985
	core_delete_hba(hba);
}
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058

/*
 * Common CDB parsing for kernel and user passthrough.
 */
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
	unsigned char *cdb = cmd->t_task_cdb;

	/*
	 * Clear a lun set in the cdb if the initiator talking to use spoke
	 * and old standards version, as we can't assume the underlying device
	 * won't choke up on it.
	 */
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}

	/*
	 * For REPORT LUNS we always need to emulate the response, for everything
	 * else, pass it up.
	 */
	if (cdb[0] == REPORT_LUNS) {
		cmd->execute_cmd = spc_emulate_report_luns;
		return TCM_NO_SENSE;
	}

	/* Set DATA_CDB flag for ops that should have it */
	switch (cdb[0]) {
	case READ_6:
	case READ_10:
	case READ_12:
	case READ_16:
	case WRITE_6:
	case WRITE_10:
	case WRITE_12:
	case WRITE_16:
	case WRITE_VERIFY:
	case WRITE_VERIFY_12:
	case 0x8e: /* WRITE_VERIFY_16 */
	case COMPARE_AND_WRITE:
	case XDWRITEREAD_10:
		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
		break;
	case VARIABLE_LENGTH_CMD:
		switch (get_unaligned_be16(&cdb[8])) {
		case READ_32:
		case WRITE_32:
		case 0x0c: /* WRITE_VERIFY_32 */
		case XDWRITEREAD_32:
			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
			break;
		}
	}

	cmd->execute_cmd = exec_cmd;

	return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);