target_core_device.c 27.8 KB
Newer Older
1 2 3
/*******************************************************************************
 * Filename:  target_core_device.c (based on iscsi_target_device.c)
 *
4
 * This file contains the TCM Virtual Device and Disk Transport
5 6
 * agnostic related functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
35
#include <linux/export.h>
36
#include <asm/unaligned.h>
37 38 39
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
40
#include <scsi/scsi_device.h>
41 42

#include <target/target_core_base.h>
43 44
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
45

C
Christoph Hellwig 已提交
46
#include "target_core_internal.h"
47 48 49 50
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

51 52 53
DEFINE_MUTEX(g_device_mutex);
LIST_HEAD(g_device_list);

54 55 56 57
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

58
sense_reason_t
H
Hannes Reinecke 已提交
59
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
60 61
{
	struct se_lun *se_lun = NULL;
62
	struct se_session *se_sess = se_cmd->se_sess;
63 64
	struct se_node_acl *nacl = se_sess->se_node_acl;
	struct se_dev_entry *deve;
65

66 67 68 69
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		atomic_long_inc(&deve->total_cmds);
70 71 72

		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
73
			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
H
Hannes Reinecke 已提交
74
				" Access for 0x%08llx\n",
75 76
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
77
			rcu_read_unlock();
78
			return TCM_WRITE_PROTECTED;
79
		}
80 81

		if (se_cmd->data_direction == DMA_TO_DEVICE)
82 83
			atomic_long_add(se_cmd->data_length,
					&deve->write_bytes);
84
		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
85 86
			atomic_long_add(se_cmd->data_length,
					&deve->read_bytes);
87

88 89
		se_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
90 91 92
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
93 94 95

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
96
	}
97
	rcu_read_unlock();
98 99

	if (!se_lun) {
100 101 102 103 104 105
		/*
		 * Use the se_portal_group->tpg_virt_lun0 to allow for
		 * REPORT_LUNS, et al to be returned when no active
		 * MappedLUN=0 exists for this Initiator Port.
		 */
		if (unpacked_lun != 0) {
106
			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
107
				" Access for 0x%08llx\n",
108
				se_cmd->se_tfo->get_fabric_name(),
109
				unpacked_lun);
110
			return TCM_NON_EXISTENT_LUN;
111 112 113 114 115
		}
		/*
		 * Force WRITE PROTECT for virtual LUN 0
		 */
		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
116 117
		    (se_cmd->data_direction != DMA_NONE))
			return TCM_WRITE_PROTECTED;
118

119 120
		se_lun = se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
121 122
		se_cmd->orig_fe_lun = 0;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
123 124 125

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
126
	}
127 128 129 130 131 132 133 134
	/*
	 * RCU reference protected by percpu se_lun->lun_ref taken above that
	 * must drop to zero (including initial reference) before this se_lun
	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
	 * target_core_fabric_configfs.c:target_fabric_port_release
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 136

	if (se_cmd->data_direction == DMA_TO_DEVICE)
137 138
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->write_bytes);
139
	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 141
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->read_bytes);
142 143 144

	return 0;
}
145
EXPORT_SYMBOL(transport_lookup_cmd_lun);
146

H
Hannes Reinecke 已提交
147
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
148 149 150
{
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
151
	struct se_session *se_sess = se_cmd->se_sess;
152
	struct se_node_acl *nacl = se_sess->se_node_acl;
153
	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
154
	unsigned long flags;
155

156 157 158 159 160 161
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
		se_lun = rcu_dereference(deve->se_lun);
162 163 164
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
	}
165
	rcu_read_unlock();
166 167

	if (!se_lun) {
168
		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
169
			" Access for 0x%08llx\n",
170
			se_cmd->se_tfo->get_fabric_name(),
171
			unpacked_lun);
172
		return -ENODEV;
173
	}
174 175 176 177 178
	/*
	 * XXX: Add percpu se_lun->lun_ref reference count for TMR
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179

180
	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
181
	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
182
	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
183 184 185

	return 0;
}
186
EXPORT_SYMBOL(transport_lookup_tmr_lun);
187

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
	struct se_session *se_sess = cmd->se_sess;
	struct se_dev_entry *deve;
	bool ret;

	rcu_read_lock();
	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);

203 204
/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
205
 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
206 207 208 209 210 211 212 213 214 215
 * when a matching rtpi is found.
 */
struct se_dev_entry *core_get_se_deve_from_rtpi(
	struct se_node_acl *nacl,
	u16 rtpi)
{
	struct se_dev_entry *deve;
	struct se_lun *lun;
	struct se_portal_group *tpg = nacl->se_tpg;

216 217 218
	rcu_read_lock();
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		lun = rcu_dereference(deve->se_lun);
219 220
		if (!lun) {
			pr_err("%s device entries device pointer is"
221
				" NULL, but Initiator has access.\n",
222
				tpg->se_tpg_tfo->get_fabric_name());
223 224
			continue;
		}
225
		if (lun->lun_rtpi != rtpi)
226 227
			continue;

228 229
		kref_get(&deve->pr_kref);
		rcu_read_unlock();
230 231 232

		return deve;
	}
233
	rcu_read_unlock();
234 235 236 237

	return NULL;
}

238
void core_free_device_list_for_node(
239 240 241 242 243
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
	struct se_dev_entry *deve;

244 245 246 247 248
	mutex_lock(&nacl->lun_entry_mutex);
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
249
	}
250
	mutex_unlock(&nacl->lun_entry_mutex);
251 252 253
}

void core_update_device_list_access(
H
Hannes Reinecke 已提交
254
	u64 mapped_lun,
255 256 257 258 259
	u32 lun_access,
	struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;

260 261 262 263 264 265 266 267 268 269
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, mapped_lun);
	if (deve) {
		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
		} else {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
		}
270
	}
271 272 273 274 275 276
	mutex_unlock(&nacl->lun_entry_mutex);
}

/*
 * Called with rcu_read_lock or nacl->device_list_lock held.
 */
H
Hannes Reinecke 已提交
277
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
{
	struct se_dev_entry *deve;

	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
		if (deve->mapped_lun == mapped_lun)
			return deve;

	return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);

void target_pr_kref_release(struct kref *kref)
{
	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
						 pr_kref);
	complete(&deve->pr_comp);
294 295
}

296
/*      core_enable_device_list_for_node():
297 298 299
 *
 *
 */
300
int core_enable_device_list_for_node(
301 302
	struct se_lun *lun,
	struct se_lun_acl *lun_acl,
H
Hannes Reinecke 已提交
303
	u64 mapped_lun,
304 305
	u32 lun_access,
	struct se_node_acl *nacl,
306
	struct se_portal_group *tpg)
307
{
308 309 310 311 312 313 314 315 316 317 318
	struct se_dev_entry *orig, *new;

	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new) {
		pr_err("Unable to allocate se_dev_entry memory\n");
		return -ENOMEM;
	}

	atomic_set(&new->ua_count, 0);
	spin_lock_init(&new->ua_lock);
	INIT_LIST_HEAD(&new->ua_list);
319
	INIT_LIST_HEAD(&new->lun_link);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

	new->mapped_lun = mapped_lun;
	kref_init(&new->pr_kref);
	init_completion(&new->pr_comp);

	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
	else
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;

	new->creation_time = get_jiffies_64();
	new->attach_count++;

	mutex_lock(&nacl->lun_entry_mutex);
	orig = target_nacl_find_deve(nacl, mapped_lun);
	if (orig && orig->se_lun) {
		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));

		if (orig_lun != lun) {
			pr_err("Existing orig->se_lun doesn't match new lun"
			       " for dynamic -> explicit NodeACL conversion:"
				" %s\n", nacl->initiatorname);
			mutex_unlock(&nacl->lun_entry_mutex);
			kfree(new);
345
			return -EINVAL;
346
		}
347
		BUG_ON(orig->se_lun_acl != NULL);
348

349 350 351 352 353
		rcu_assign_pointer(new->se_lun, lun);
		rcu_assign_pointer(new->se_lun_acl, lun_acl);
		hlist_del_rcu(&orig->link);
		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
		mutex_unlock(&nacl->lun_entry_mutex);
354

355 356 357 358
		spin_lock_bh(&lun->lun_deve_lock);
		list_del(&orig->lun_link);
		list_add_tail(&new->lun_link, &lun->lun_deve_list);
		spin_unlock_bh(&lun->lun_deve_lock);
359

360 361
		kref_put(&orig->pr_kref, target_pr_kref_release);
		wait_for_completion(&orig->pr_comp);
362

363 364
		kfree_rcu(orig, rcu_head);
		return 0;
365
	}
366

367 368 369 370
	rcu_assign_pointer(new->se_lun, lun);
	rcu_assign_pointer(new->se_lun_acl, lun_acl);
	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
	mutex_unlock(&nacl->lun_entry_mutex);
371

372 373 374
	spin_lock_bh(&lun->lun_deve_lock);
	list_add_tail(&new->lun_link, &lun->lun_deve_list);
	spin_unlock_bh(&lun->lun_deve_lock);
375 376 377 378

	return 0;
}

379 380
/*
 *	Called with se_node_acl->lun_entry_mutex held.
381
 */
382
void core_disable_device_list_for_node(
383
	struct se_lun *lun,
384
	struct se_dev_entry *orig,
385 386 387
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
388 389 390 391 392
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
393 394
	/*
	 * If the MappedLUN entry is being disabled, the entry in
395
	 * lun->lun_deve_list must be removed now before clearing the
396 397 398 399 400
	 * struct se_dev_entry pointers below as logic in
	 * core_alua_do_transition_tg_pt() depends on these being present.
	 *
	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
	 * that have not been explicitly converted to MappedLUNs ->
401 402
	 * struct se_lun_acl, but we remove deve->lun_link from
	 * lun->lun_deve_list. This also means that active UAs and
403 404 405
	 * NodeACL context specific PR metadata for demo-mode
	 * MappedLUN *deve will be released below..
	 */
406 407 408
	spin_lock_bh(&lun->lun_deve_lock);
	list_del(&orig->lun_link);
	spin_unlock_bh(&lun->lun_deve_lock);
409
	/*
410
	 * Disable struct se_dev_entry LUN ACL mapping
411
	 */
412 413 414
	core_scsi3_ua_release_all(orig);

	hlist_del_rcu(&orig->link);
415
	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
416 417 418 419 420
	rcu_assign_pointer(orig->se_lun, NULL);
	rcu_assign_pointer(orig->se_lun_acl, NULL);
	orig->lun_flags = 0;
	orig->creation_time = 0;
	orig->attach_count--;
421
	/*
422 423
	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
	 * or REGISTER_AND_MOVE PR operation to complete.
424
	 */
425 426 427 428
	kref_put(&orig->pr_kref, target_pr_kref_release);
	wait_for_completion(&orig->pr_comp);

	kfree_rcu(orig, rcu_head);
429

430
	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
431 432 433 434 435 436 437 438 439 440 441
}

/*      core_clear_lun_from_tpg():
 *
 *
 */
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
	struct se_node_acl *nacl;
	struct se_dev_entry *deve;

442
	mutex_lock(&tpg->acl_node_mutex);
443 444
	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {

445 446 447 448
		mutex_lock(&nacl->lun_entry_mutex);
		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
449

450 451
			if (lun != tmp_lun)
				continue;
452

453
			core_disable_device_list_for_node(lun, deve, nacl, tpg);
454
		}
455
		mutex_unlock(&nacl->lun_entry_mutex);
456
	}
457
	mutex_unlock(&tpg->acl_node_mutex);
458 459
}

460
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
461
{
462
	struct se_lun *tmp;
463 464

	spin_lock(&dev->se_port_lock);
465
	if (dev->export_count == 0x0000ffff) {
466
		pr_warn("Reached dev->dev_port_count =="
467 468
				" 0x0000ffff\n");
		spin_unlock(&dev->se_port_lock);
469
		return -ENOSPC;
470 471 472
	}
again:
	/*
473
	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
474 475 476 477 478 479 480 481 482 483
	 * Here is the table from spc4r17 section 7.7.3.8.
	 *
	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
	 *
	 * Code      Description
	 * 0h        Reserved
	 * 1h        Relative port 1, historically known as port A
	 * 2h        Relative port 2, historically known as port B
	 * 3h to FFFFh    Relative port 3 through 65 535
	 */
484 485
	lun->lun_rtpi = dev->dev_rpti_counter++;
	if (!lun->lun_rtpi)
486 487
		goto again;

488
	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
489
		/*
490
		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
491 492
		 * for 16-bit wrap..
		 */
493
		if (lun->lun_rtpi == tmp->lun_rtpi)
494 495 496 497 498 499 500
			goto again;
	}
	spin_unlock(&dev->se_port_lock);

	return 0;
}

501
static void se_release_vpd_for_dev(struct se_device *dev)
502 503 504
{
	struct t10_vpd *vpd, *vpd_tmp;

505
	spin_lock(&dev->t10_wwn.t10_vpd_lock);
506
	list_for_each_entry_safe(vpd, vpd_tmp,
507
			&dev->t10_wwn.t10_vpd_list, vpd_list) {
508 509 510
		list_del(&vpd->vpd_list);
		kfree(vpd);
	}
511
	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
512 513
}

514
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
515
{
516 517
	u32 aligned_max_sectors;
	u32 alignment;
518 519 520 521
	/*
	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
	 * transport_allocate_data_tasks() operation.
	 */
522 523 524 525 526 527
	alignment = max(1ul, PAGE_SIZE / block_size);
	aligned_max_sectors = rounddown(max_sectors, alignment);

	if (max_sectors != aligned_max_sectors)
		pr_info("Rounding down aligned max_sectors from %u to %u\n",
			max_sectors, aligned_max_sectors);
528

529
	return aligned_max_sectors;
530 531
}

532
int core_dev_add_lun(
533 534
	struct se_portal_group *tpg,
	struct se_device *dev,
535
	struct se_lun *lun)
536
{
537
	int rc;
538

539
	rc = core_tpg_add_lun(tpg, lun,
540
				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
541
	if (rc < 0)
542
		return rc;
543

H
Hannes Reinecke 已提交
544
	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
545
		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
546
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
547
		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
548 549 550 551
	/*
	 * Update LUN maps for dynamically added initiators when
	 * generate_node_acl is enabled.
	 */
552
	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
553
		struct se_node_acl *acl;
554 555

		mutex_lock(&tpg->acl_node_mutex);
556
		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
557 558 559
			if (acl->dynamic_node_acl &&
			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
560
				core_tpg_add_node_to_devs(acl, tpg, lun);
561 562
			}
		}
563
		mutex_unlock(&tpg->acl_node_mutex);
564 565
	}

566
	return 0;
567 568 569 570 571 572
}

/*      core_dev_del_lun():
 *
 *
 */
573
void core_dev_del_lun(
574
	struct se_portal_group *tpg,
575
	struct se_lun *lun)
576
{
H
Hannes Reinecke 已提交
577
	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
578
		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
579
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
580
		tpg->se_tpg_tfo->get_fabric_name());
581

582
	core_tpg_remove_lun(tpg, lun);
583 584 585 586
}

struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
	struct se_portal_group *tpg,
587
	struct se_node_acl *nacl,
H
Hannes Reinecke 已提交
588
	u64 mapped_lun,
589 590 591 592
	int *ret)
{
	struct se_lun_acl *lacl;

593
	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
594
		pr_err("%s InitiatorName exceeds maximum size.\n",
595
			tpg->se_tpg_tfo->get_fabric_name());
596 597 598 599
		*ret = -EOVERFLOW;
		return NULL;
	}
	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
600 601
	if (!lacl) {
		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
602 603 604 605 606 607
		*ret = -ENOMEM;
		return NULL;
	}

	lacl->mapped_lun = mapped_lun;
	lacl->se_lun_nacl = nacl;
608 609
	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
		 nacl->initiatorname);
610 611 612 613 614 615 616

	return lacl;
}

int core_dev_add_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl,
617
	struct se_lun *lun,
618 619
	u32 lun_access)
{
620
	struct se_node_acl *nacl = lacl->se_lun_nacl;
621 622 623 624 625
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
626

627
	if (!nacl)
628 629 630 631 632 633 634 635
		return -EINVAL;

	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;

	lacl->se_lun = lun;

636 637
	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
			lun_access, nacl, tpg) < 0)
638 639
		return -EINVAL;

H
Hannes Reinecke 已提交
640
	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
641
		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
642
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
643 644 645 646 647 648
		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
		lacl->initiatorname);
	/*
	 * Check to see if there are any existing persistent reservation APTPL
	 * pre-registrations that need to be enabled for this LUN ACL..
	 */
649
	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
650
					    lacl->mapped_lun);
651 652 653 654 655 656 657
	return 0;
}

int core_dev_del_initiator_node_lun_acl(
	struct se_lun *lun,
	struct se_lun_acl *lacl)
{
658
	struct se_portal_group *tpg = lun->lun_tpg;
659
	struct se_node_acl *nacl;
660
	struct se_dev_entry *deve;
661 662

	nacl = lacl->se_lun_nacl;
663
	if (!nacl)
664 665
		return -EINVAL;

666 667 668 669 670
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (deve)
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
	mutex_unlock(&nacl->lun_entry_mutex);
671

H
Hannes Reinecke 已提交
672 673
	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
		" InitiatorNode: %s Mapped LUN: %llu\n",
674 675
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
676 677 678 679 680 681 682 683 684
		lacl->initiatorname, lacl->mapped_lun);

	return 0;
}

void core_dev_free_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl)
{
685
	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
H
Hannes Reinecke 已提交
686
		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
687 688
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		tpg->se_tpg_tfo->get_fabric_name(),
689 690 691 692 693
		lacl->initiatorname, lacl->mapped_lun);

	kfree(lacl);
}

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
static void scsi_dump_inquiry(struct se_device *dev)
{
	struct t10_wwn *wwn = &dev->t10_wwn;
	char buf[17];
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
			buf[i] = wwn->vendor[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
			buf[i] = wwn->model[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
			buf[i] = wwn->revision[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);

	device_type = dev->transport->get_device_type(dev);
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
}

struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
	struct se_device *dev;
733
	struct se_lun *xcopy_lun;
734

735
	dev = hba->backend->ops->alloc_device(hba, name);
736 737 738
	if (!dev)
		return NULL;

739
	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
740
	dev->se_hba = hba;
741
	dev->transport = hba->backend->ops;
742
	dev->prot_length = sizeof(struct se_dif_v1_tuple);
743
	dev->hba_index = hba->hba_index;
744 745 746 747 748 749 750

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_list);
	INIT_LIST_HEAD(&dev->qf_cmd_list);
751
	INIT_LIST_HEAD(&dev->g_dev_node);
752 753 754 755 756 757
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
	spin_lock_init(&dev->qf_cmd_lock);
758
	sema_init(&dev->caw_sem, 1);
759 760 761 762 763 764 765 766 767
	atomic_set(&dev->dev_ordered_id, 0);
	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
	spin_lock_init(&dev->t10_pr.registration_lock);
	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
768 769
	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
	spin_lock_init(&dev->t10_alua.lba_map_lock);
770 771 772 773 774

	dev->t10_wwn.t10_dev = dev;
	dev->t10_alua.t10_dev = dev;

	dev->dev_attrib.da_dev = dev;
775
	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
776 777 778
	dev->dev_attrib.emulate_dpo = 1;
	dev->dev_attrib.emulate_fua_write = 1;
	dev->dev_attrib.emulate_fua_read = 1;
779 780 781 782 783
	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
784
	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
785
	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
786
	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
787
	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
788
	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
789 790 791 792 793 794 795 796
	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
	dev->dev_attrib.max_unmap_block_desc_count =
		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
	dev->dev_attrib.unmap_granularity_alignment =
				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
797
	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
798

799
	xcopy_lun = &dev->xcopy_lun;
800
	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
801
	init_completion(&xcopy_lun->lun_ref_comp);
802 803 804 805
	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
806

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
	return dev;
}

int target_configure_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;
	int ret;

	if (dev->dev_flags & DF_CONFIGURED) {
		pr_err("se_dev->se_dev_ptr already set for storage"
				" object\n");
		return -EEXIST;
	}

	ret = dev->transport->configure_device(dev);
	if (ret)
		goto out;
	/*
	 * XXX: there is not much point to have two different values here..
	 */
	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;

	/*
	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
	 */
	dev->dev_attrib.hw_max_sectors =
		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
					 dev->dev_attrib.hw_block_size);
836
	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();

	ret = core_setup_alua(dev);
	if (ret)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
				      dev->transport->name);
	if (!dev->tmr_wq) {
		pr_err("Unable to create tmr workqueue for %s\n",
			dev->transport->name);
		ret = -ENOMEM;
		goto out_free_alua;
	}

	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);

	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 */
867
	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
868 869 870 871 872 873 874 875 876 877 878 879
		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->t10_wwn.model[0],
			dev->transport->inquiry_prod, 16);
		strncpy(&dev->t10_wwn.revision[0],
			dev->transport->inquiry_rev, 4);
	}

	scsi_dump_inquiry(dev);

	spin_lock(&hba->device_lock);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
880 881 882 883 884

	mutex_lock(&g_device_mutex);
	list_add_tail(&dev->g_dev_node, &g_device_list);
	mutex_unlock(&g_device_mutex);

885 886
	dev->dev_flags |= DF_CONFIGURED;

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	return 0;

out_free_alua:
	core_alua_free_lu_gp_mem(dev);
out:
	se_release_vpd_for_dev(dev);
	return ret;
}

void target_free_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;

	WARN_ON(!list_empty(&dev->dev_sep_list));

	if (dev->dev_flags & DF_CONFIGURED) {
		destroy_workqueue(dev->tmr_wq);

905 906 907 908
		mutex_lock(&g_device_mutex);
		list_del(&dev->g_dev_node);
		mutex_unlock(&g_device_mutex);

909 910 911 912 913 914
		spin_lock(&hba->device_lock);
		hba->dev_count--;
		spin_unlock(&hba->device_lock);
	}

	core_alua_free_lu_gp_mem(dev);
915
	core_alua_set_lba_map(dev, NULL, 0, 0);
916 917 918
	core_scsi3_free_all_registrations(dev);
	se_release_vpd_for_dev(dev);

919 920 921
	if (dev->transport->free_prot)
		dev->transport->free_prot(dev);

922 923 924
	dev->transport->free_device(dev);
}

925 926 927 928
int core_dev_setup_virtual_lun0(void)
{
	struct se_hba *hba;
	struct se_device *dev;
929
	char buf[] = "rd_pages=8,rd_nullio=1";
930 931
	int ret;

932
	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
933 934 935
	if (IS_ERR(hba))
		return PTR_ERR(hba);

936 937
	dev = target_alloc_device(hba, "virt_lun0");
	if (!dev) {
938
		ret = -ENOMEM;
939
		goto out_free_hba;
940 941
	}

942
	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
943

944 945 946
	ret = target_configure_device(dev);
	if (ret)
		goto out_free_se_dev;
947

948 949
	lun0_hba = hba;
	g_lun0_dev = dev;
950
	return 0;
951 952 953 954 955

out_free_se_dev:
	target_free_device(dev);
out_free_hba:
	core_delete_hba(hba);
956 957 958 959 960 961
	return ret;
}


void core_dev_release_virtual_lun0(void)
{
962
	struct se_hba *hba = lun0_hba;
963

964
	if (!hba)
965 966
		return;

967
	if (g_lun0_dev)
968
		target_free_device(g_lun0_dev);
969 970
	core_delete_hba(hba);
}
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

/*
 * Common CDB parsing for kernel and user passthrough.
 */
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
	unsigned char *cdb = cmd->t_task_cdb;

	/*
	 * Clear a lun set in the cdb if the initiator talking to use spoke
	 * and old standards version, as we can't assume the underlying device
	 * won't choke up on it.
	 */
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}

	/*
	 * For REPORT LUNS we always need to emulate the response, for everything
	 * else, pass it up.
	 */
	if (cdb[0] == REPORT_LUNS) {
		cmd->execute_cmd = spc_emulate_report_luns;
		return TCM_NO_SENSE;
	}

	/* Set DATA_CDB flag for ops that should have it */
	switch (cdb[0]) {
	case READ_6:
	case READ_10:
	case READ_12:
	case READ_16:
	case WRITE_6:
	case WRITE_10:
	case WRITE_12:
	case WRITE_16:
	case WRITE_VERIFY:
	case WRITE_VERIFY_12:
	case 0x8e: /* WRITE_VERIFY_16 */
	case COMPARE_AND_WRITE:
	case XDWRITEREAD_10:
		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
		break;
	case VARIABLE_LENGTH_CMD:
		switch (get_unaligned_be16(&cdb[8])) {
		case READ_32:
		case WRITE_32:
		case 0x0c: /* WRITE_VERIFY_32 */
		case XDWRITEREAD_32:
			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
			break;
		}
	}

	cmd->execute_cmd = exec_cmd;

	return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);