target_core_device.c 29.5 KB
Newer Older
1 2 3
/*******************************************************************************
 * Filename:  target_core_device.c (based on iscsi_target_device.c)
 *
4
 * This file contains the TCM Virtual Device and Disk Transport
5 6
 * agnostic related functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
35
#include <linux/export.h>
36
#include <asm/unaligned.h>
37 38
#include <net/sock.h>
#include <net/tcp.h>
39 40
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
41 42

#include <target/target_core_base.h>
43 44
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
45

C
Christoph Hellwig 已提交
46
#include "target_core_internal.h"
47 48 49 50
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

51 52 53
DEFINE_MUTEX(g_device_mutex);
LIST_HEAD(g_device_list);

54 55 56 57
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

58
sense_reason_t
H
Hannes Reinecke 已提交
59
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
60 61
{
	struct se_lun *se_lun = NULL;
62
	struct se_session *se_sess = se_cmd->se_sess;
63 64
	struct se_node_acl *nacl = se_sess->se_node_acl;
	struct se_dev_entry *deve;
65
	sense_reason_t ret = TCM_NO_SENSE;
66

67 68 69 70
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		atomic_long_inc(&deve->total_cmds);
71 72

		if (se_cmd->data_direction == DMA_TO_DEVICE)
73 74
			atomic_long_add(se_cmd->data_length,
					&deve->write_bytes);
75
		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
76 77
			atomic_long_add(se_cmd->data_length,
					&deve->read_bytes);
78

79 80
		se_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
81 82 83
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
84 85 86

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
87 88 89 90 91 92 93 94 95 96 97

		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
				" Access for 0x%08llx\n",
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
			rcu_read_unlock();
			ret = TCM_WRITE_PROTECTED;
			goto ref_dev;
		}
98
	}
99
	rcu_read_unlock();
100 101

	if (!se_lun) {
102 103 104 105 106 107
		/*
		 * Use the se_portal_group->tpg_virt_lun0 to allow for
		 * REPORT_LUNS, et al to be returned when no active
		 * MappedLUN=0 exists for this Initiator Port.
		 */
		if (unpacked_lun != 0) {
108
			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
109
				" Access for 0x%08llx\n",
110
				se_cmd->se_tfo->get_fabric_name(),
111
				unpacked_lun);
112
			return TCM_NON_EXISTENT_LUN;
113 114
		}

115 116
		se_lun = se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
117 118
		se_cmd->orig_fe_lun = 0;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
119 120 121

		percpu_ref_get(&se_lun->lun_ref);
		se_cmd->lun_ref_active = true;
122 123 124 125 126 127 128 129 130

		/*
		 * Force WRITE PROTECT for virtual LUN 0
		 */
		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
		    (se_cmd->data_direction != DMA_NONE)) {
			ret = TCM_WRITE_PROTECTED;
			goto ref_dev;
		}
131
	}
132 133 134 135 136 137
	/*
	 * RCU reference protected by percpu se_lun->lun_ref taken above that
	 * must drop to zero (including initial reference) before this se_lun
	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
	 * target_core_fabric_configfs.c:target_fabric_port_release
	 */
138
ref_dev:
139 140
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	atomic_long_inc(&se_cmd->se_dev->num_cmds);
141 142

	if (se_cmd->data_direction == DMA_TO_DEVICE)
143 144
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->write_bytes);
145
	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
146 147
		atomic_long_add(se_cmd->data_length,
				&se_cmd->se_dev->read_bytes);
148

149
	return ret;
150
}
151
EXPORT_SYMBOL(transport_lookup_cmd_lun);
152

H
Hannes Reinecke 已提交
153
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
154 155 156
{
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
157
	struct se_session *se_sess = se_cmd->se_sess;
158
	struct se_node_acl *nacl = se_sess->se_node_acl;
159
	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
160
	unsigned long flags;
161

162 163 164 165 166 167
	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, unpacked_lun);
	if (deve) {
		se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
		se_cmd->se_lun = rcu_dereference(deve->se_lun);
		se_lun = rcu_dereference(deve->se_lun);
168 169 170
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
	}
171
	rcu_read_unlock();
172 173

	if (!se_lun) {
174
		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
H
Hannes Reinecke 已提交
175
			" Access for 0x%08llx\n",
176
			se_cmd->se_tfo->get_fabric_name(),
177
			unpacked_lun);
178
		return -ENODEV;
179
	}
180 181 182 183 184
	/*
	 * XXX: Add percpu se_lun->lun_ref reference count for TMR
	 */
	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
185

186
	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
187
	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
188
	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
189 190 191

	return 0;
}
192
EXPORT_SYMBOL(transport_lookup_tmr_lun);
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
	struct se_session *se_sess = cmd->se_sess;
	struct se_dev_entry *deve;
	bool ret;

	rcu_read_lock();
	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);

209 210
/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
211
 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
212 213 214 215 216 217 218 219 220 221
 * when a matching rtpi is found.
 */
struct se_dev_entry *core_get_se_deve_from_rtpi(
	struct se_node_acl *nacl,
	u16 rtpi)
{
	struct se_dev_entry *deve;
	struct se_lun *lun;
	struct se_portal_group *tpg = nacl->se_tpg;

222 223 224
	rcu_read_lock();
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		lun = rcu_dereference(deve->se_lun);
225 226
		if (!lun) {
			pr_err("%s device entries device pointer is"
227
				" NULL, but Initiator has access.\n",
228
				tpg->se_tpg_tfo->get_fabric_name());
229 230
			continue;
		}
231
		if (lun->lun_rtpi != rtpi)
232 233
			continue;

234 235
		kref_get(&deve->pr_kref);
		rcu_read_unlock();
236 237 238

		return deve;
	}
239
	rcu_read_unlock();
240 241 242 243

	return NULL;
}

244
void core_free_device_list_for_node(
245 246 247 248 249
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
	struct se_dev_entry *deve;

250 251 252 253 254
	mutex_lock(&nacl->lun_entry_mutex);
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
255
	}
256
	mutex_unlock(&nacl->lun_entry_mutex);
257 258 259
}

void core_update_device_list_access(
H
Hannes Reinecke 已提交
260
	u64 mapped_lun,
261 262 263 264 265
	u32 lun_access,
	struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;

266 267 268 269 270 271 272 273 274 275
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, mapped_lun);
	if (deve) {
		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
		} else {
			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
		}
276
	}
277
	mutex_unlock(&nacl->lun_entry_mutex);
278 279
}

280 281
/*
 * Called with rcu_read_lock or nacl->device_list_lock held.
282
 */
H
Hannes Reinecke 已提交
283
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
{
	struct se_dev_entry *deve;

	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
		if (deve->mapped_lun == mapped_lun)
			return deve;

	return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);

void target_pr_kref_release(struct kref *kref)
{
	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
						 pr_kref);
	complete(&deve->pr_comp);
300 301
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
static void
target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
			     bool skip_new)
{
	struct se_dev_entry *tmp;

	rcu_read_lock();
	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
		if (skip_new && tmp == new)
			continue;
		core_scsi3_ua_allocate(tmp, 0x3F,
				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
	}
	rcu_read_unlock();
}

318
int core_enable_device_list_for_node(
319 320
	struct se_lun *lun,
	struct se_lun_acl *lun_acl,
H
Hannes Reinecke 已提交
321
	u64 mapped_lun,
322 323
	u32 lun_access,
	struct se_node_acl *nacl,
324
	struct se_portal_group *tpg)
325
{
326
	struct se_dev_entry *orig, *new;
327

328 329 330 331 332
	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new) {
		pr_err("Unable to allocate se_dev_entry memory\n");
		return -ENOMEM;
	}
333

334 335 336
	atomic_set(&new->ua_count, 0);
	spin_lock_init(&new->ua_lock);
	INIT_LIST_HEAD(&new->ua_list);
337
	INIT_LIST_HEAD(&new->lun_link);
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	new->mapped_lun = mapped_lun;
	kref_init(&new->pr_kref);
	init_completion(&new->pr_comp);

	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
	else
		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;

	new->creation_time = get_jiffies_64();
	new->attach_count++;

	mutex_lock(&nacl->lun_entry_mutex);
	orig = target_nacl_find_deve(nacl, mapped_lun);
	if (orig && orig->se_lun) {
		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));

		if (orig_lun != lun) {
			pr_err("Existing orig->se_lun doesn't match new lun"
			       " for dynamic -> explicit NodeACL conversion:"
				" %s\n", nacl->initiatorname);
			mutex_unlock(&nacl->lun_entry_mutex);
			kfree(new);
363
			return -EINVAL;
364
		}
365
		BUG_ON(orig->se_lun_acl != NULL);
366

367 368 369 370 371
		rcu_assign_pointer(new->se_lun, lun);
		rcu_assign_pointer(new->se_lun_acl, lun_acl);
		hlist_del_rcu(&orig->link);
		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
		mutex_unlock(&nacl->lun_entry_mutex);
372

373
		spin_lock(&lun->lun_deve_lock);
374 375
		list_del(&orig->lun_link);
		list_add_tail(&new->lun_link, &lun->lun_deve_list);
376
		spin_unlock(&lun->lun_deve_lock);
377

378 379
		kref_put(&orig->pr_kref, target_pr_kref_release);
		wait_for_completion(&orig->pr_comp);
380

381
		target_luns_data_has_changed(nacl, new, true);
382 383
		kfree_rcu(orig, rcu_head);
		return 0;
384
	}
385

386 387 388 389
	rcu_assign_pointer(new->se_lun, lun);
	rcu_assign_pointer(new->se_lun_acl, lun_acl);
	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
	mutex_unlock(&nacl->lun_entry_mutex);
390

391
	spin_lock(&lun->lun_deve_lock);
392
	list_add_tail(&new->lun_link, &lun->lun_deve_list);
393
	spin_unlock(&lun->lun_deve_lock);
394

395
	target_luns_data_has_changed(nacl, new, true);
396 397 398
	return 0;
}

399 400
/*
 *	Called with se_node_acl->lun_entry_mutex held.
401
 */
402
void core_disable_device_list_for_node(
403
	struct se_lun *lun,
404
	struct se_dev_entry *orig,
405 406 407
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
408 409 410 411 412
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
413 414
	/*
	 * If the MappedLUN entry is being disabled, the entry in
415
	 * lun->lun_deve_list must be removed now before clearing the
416 417 418 419 420
	 * struct se_dev_entry pointers below as logic in
	 * core_alua_do_transition_tg_pt() depends on these being present.
	 *
	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
	 * that have not been explicitly converted to MappedLUNs ->
421 422
	 * struct se_lun_acl, but we remove deve->lun_link from
	 * lun->lun_deve_list. This also means that active UAs and
423 424 425
	 * NodeACL context specific PR metadata for demo-mode
	 * MappedLUN *deve will be released below..
	 */
426
	spin_lock(&lun->lun_deve_lock);
427
	list_del(&orig->lun_link);
428
	spin_unlock(&lun->lun_deve_lock);
429
	/*
430
	 * Disable struct se_dev_entry LUN ACL mapping
431
	 */
432 433 434
	core_scsi3_ua_release_all(orig);

	hlist_del_rcu(&orig->link);
435
	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
436 437 438
	orig->lun_flags = 0;
	orig->creation_time = 0;
	orig->attach_count--;
439
	/*
440 441
	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
	 * or REGISTER_AND_MOVE PR operation to complete.
442
	 */
443 444 445
	kref_put(&orig->pr_kref, target_pr_kref_release);
	wait_for_completion(&orig->pr_comp);

446 447 448
	rcu_assign_pointer(orig->se_lun, NULL);
	rcu_assign_pointer(orig->se_lun_acl, NULL);

449
	kfree_rcu(orig, rcu_head);
450

451
	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
452
	target_luns_data_has_changed(nacl, NULL, false);
453 454 455 456 457 458 459 460 461 462 463
}

/*      core_clear_lun_from_tpg():
 *
 *
 */
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
	struct se_node_acl *nacl;
	struct se_dev_entry *deve;

464
	mutex_lock(&tpg->acl_node_mutex);
465 466
	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {

467 468 469 470
		mutex_lock(&nacl->lun_entry_mutex);
		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
					lockdep_is_held(&nacl->lun_entry_mutex));
471

472 473
			if (lun != tmp_lun)
				continue;
474

475
			core_disable_device_list_for_node(lun, deve, nacl, tpg);
476
		}
477
		mutex_unlock(&nacl->lun_entry_mutex);
478
	}
479
	mutex_unlock(&tpg->acl_node_mutex);
480 481
}

482
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
483
{
484
	struct se_lun *tmp;
485 486

	spin_lock(&dev->se_port_lock);
487
	if (dev->export_count == 0x0000ffff) {
488
		pr_warn("Reached dev->dev_port_count =="
489 490
				" 0x0000ffff\n");
		spin_unlock(&dev->se_port_lock);
491
		return -ENOSPC;
492 493 494
	}
again:
	/*
495
	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
496 497 498 499 500 501 502 503 504 505
	 * Here is the table from spc4r17 section 7.7.3.8.
	 *
	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
	 *
	 * Code      Description
	 * 0h        Reserved
	 * 1h        Relative port 1, historically known as port A
	 * 2h        Relative port 2, historically known as port B
	 * 3h to FFFFh    Relative port 3 through 65 535
	 */
506 507
	lun->lun_rtpi = dev->dev_rpti_counter++;
	if (!lun->lun_rtpi)
508 509
		goto again;

510
	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
511
		/*
512
		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
513 514
		 * for 16-bit wrap..
		 */
515
		if (lun->lun_rtpi == tmp->lun_rtpi)
516 517 518 519 520 521 522
			goto again;
	}
	spin_unlock(&dev->se_port_lock);

	return 0;
}

523
static void se_release_vpd_for_dev(struct se_device *dev)
524 525 526
{
	struct t10_vpd *vpd, *vpd_tmp;

527
	spin_lock(&dev->t10_wwn.t10_vpd_lock);
528
	list_for_each_entry_safe(vpd, vpd_tmp,
529
			&dev->t10_wwn.t10_vpd_list, vpd_list) {
530 531 532
		list_del(&vpd->vpd_list);
		kfree(vpd);
	}
533
	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
534 535
}

536
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
537
{
538 539
	u32 aligned_max_sectors;
	u32 alignment;
540 541 542 543
	/*
	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
	 * transport_allocate_data_tasks() operation.
	 */
544 545 546 547 548 549
	alignment = max(1ul, PAGE_SIZE / block_size);
	aligned_max_sectors = rounddown(max_sectors, alignment);

	if (max_sectors != aligned_max_sectors)
		pr_info("Rounding down aligned max_sectors from %u to %u\n",
			max_sectors, aligned_max_sectors);
550

551
	return aligned_max_sectors;
552 553
}

554
int core_dev_add_lun(
555 556
	struct se_portal_group *tpg,
	struct se_device *dev,
557
	struct se_lun *lun)
558
{
559
	int rc;
560

561
	rc = core_tpg_add_lun(tpg, lun,
562
				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
563
	if (rc < 0)
564
		return rc;
565

H
Hannes Reinecke 已提交
566
	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
567
		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
568
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
569
		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
570 571 572 573
	/*
	 * Update LUN maps for dynamically added initiators when
	 * generate_node_acl is enabled.
	 */
574
	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
575
		struct se_node_acl *acl;
576 577

		mutex_lock(&tpg->acl_node_mutex);
578
		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
579 580 581
			if (acl->dynamic_node_acl &&
			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
582
				core_tpg_add_node_to_devs(acl, tpg, lun);
583 584
			}
		}
585
		mutex_unlock(&tpg->acl_node_mutex);
586 587
	}

588
	return 0;
589 590 591 592 593 594
}

/*      core_dev_del_lun():
 *
 *
 */
595
void core_dev_del_lun(
596
	struct se_portal_group *tpg,
597
	struct se_lun *lun)
598
{
H
Hannes Reinecke 已提交
599
	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
600
		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
601
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
602
		tpg->se_tpg_tfo->get_fabric_name());
603

604
	core_tpg_remove_lun(tpg, lun);
605 606 607 608
}

struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
	struct se_portal_group *tpg,
609
	struct se_node_acl *nacl,
H
Hannes Reinecke 已提交
610
	u64 mapped_lun,
611 612 613 614
	int *ret)
{
	struct se_lun_acl *lacl;

615
	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
616
		pr_err("%s InitiatorName exceeds maximum size.\n",
617
			tpg->se_tpg_tfo->get_fabric_name());
618 619 620 621
		*ret = -EOVERFLOW;
		return NULL;
	}
	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
622 623
	if (!lacl) {
		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
624 625 626 627 628 629 630 631 632 633 634 635 636
		*ret = -ENOMEM;
		return NULL;
	}

	lacl->mapped_lun = mapped_lun;
	lacl->se_lun_nacl = nacl;

	return lacl;
}

int core_dev_add_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl,
637
	struct se_lun *lun,
638 639
	u32 lun_access)
{
640
	struct se_node_acl *nacl = lacl->se_lun_nacl;
641 642 643 644 645
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
646

647
	if (!nacl)
648 649 650 651 652 653 654 655
		return -EINVAL;

	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;

	lacl->se_lun = lun;

656 657
	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
			lun_access, nacl, tpg) < 0)
658 659
		return -EINVAL;

H
Hannes Reinecke 已提交
660
	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
661
		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
662
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
663
		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
664
		nacl->initiatorname);
665 666 667 668
	/*
	 * Check to see if there are any existing persistent reservation APTPL
	 * pre-registrations that need to be enabled for this LUN ACL..
	 */
669
	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
670
					    lacl->mapped_lun);
671 672 673 674 675 676 677
	return 0;
}

int core_dev_del_initiator_node_lun_acl(
	struct se_lun *lun,
	struct se_lun_acl *lacl)
{
678
	struct se_portal_group *tpg = lun->lun_tpg;
679
	struct se_node_acl *nacl;
680
	struct se_dev_entry *deve;
681 682

	nacl = lacl->se_lun_nacl;
683
	if (!nacl)
684 685
		return -EINVAL;

686 687 688 689 690
	mutex_lock(&nacl->lun_entry_mutex);
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (deve)
		core_disable_device_list_for_node(lun, deve, nacl, tpg);
	mutex_unlock(&nacl->lun_entry_mutex);
691

H
Hannes Reinecke 已提交
692 693
	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
		" InitiatorNode: %s Mapped LUN: %llu\n",
694 695
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
696
		nacl->initiatorname, lacl->mapped_lun);
697 698 699 700 701 702 703 704

	return 0;
}

void core_dev_free_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl)
{
705
	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
H
Hannes Reinecke 已提交
706
		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
707 708
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		tpg->se_tpg_tfo->get_fabric_name(),
709
		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
710 711 712 713

	kfree(lacl);
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
static void scsi_dump_inquiry(struct se_device *dev)
{
	struct t10_wwn *wwn = &dev->t10_wwn;
	char buf[17];
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
			buf[i] = wwn->vendor[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
			buf[i] = wwn->model[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
			buf[i] = wwn->revision[i];
		else
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);

	device_type = dev->transport->get_device_type(dev);
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
}

struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
	struct se_device *dev;
753
	struct se_lun *xcopy_lun;
754

755
	dev = hba->backend->ops->alloc_device(hba, name);
756 757 758
	if (!dev)
		return NULL;

759
	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
760
	dev->se_hba = hba;
761
	dev->transport = hba->backend->ops;
S
Sagi Grimberg 已提交
762
	dev->prot_length = sizeof(struct t10_pi_tuple);
763
	dev->hba_index = hba->hba_index;
764 765 766 767 768 769 770

	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_list);
	INIT_LIST_HEAD(&dev->qf_cmd_list);
771
	INIT_LIST_HEAD(&dev->g_dev_node);
772 773 774 775 776 777
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
	spin_lock_init(&dev->qf_cmd_lock);
778
	sema_init(&dev->caw_sem, 1);
779 780 781 782 783 784 785 786
	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
	spin_lock_init(&dev->t10_pr.registration_lock);
	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
787 788
	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
	spin_lock_init(&dev->t10_alua.lba_map_lock);
789 790 791 792 793

	dev->t10_wwn.t10_dev = dev;
	dev->t10_alua.t10_dev = dev;

	dev->dev_attrib.da_dev = dev;
794
	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
795 796 797
	dev->dev_attrib.emulate_dpo = 1;
	dev->dev_attrib.emulate_fua_write = 1;
	dev->dev_attrib.emulate_fua_read = 1;
798 799 800 801 802
	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
803
	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
804
	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
805
	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
806
	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
807
	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
808 809 810 811 812 813 814 815
	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
	dev->dev_attrib.max_unmap_block_desc_count =
		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
	dev->dev_attrib.unmap_granularity_alignment =
				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
816 817
	dev->dev_attrib.unmap_zeroes_data =
				DA_UNMAP_ZEROES_DATA_DEFAULT;
818
	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
819

820
	xcopy_lun = &dev->xcopy_lun;
821
	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
822
	init_completion(&xcopy_lun->lun_ref_comp);
823 824 825 826
	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
827

828 829 830
	return dev;
}

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
/*
 * Check if the underlying struct block_device request_queue supports
 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
 * in ATA and we need to set TPE=1
 */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
				       struct request_queue *q, int block_size)
{
	if (!blk_queue_discard(q))
		return false;

	attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
								block_size;
	/*
	 * Currently hardcoded to 1 in Linux/SCSI code..
	 */
	attrib->max_unmap_block_desc_count = 1;
	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
								block_size;
	attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
	return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);

/*
 * Convert from blocksize advertised to the initiator to the 512 byte
 * units unconditionally used by the Linux block layer.
 */
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{
	switch (dev->dev_attrib.block_size) {
	case 4096:
		return lb << 3;
	case 2048:
		return lb << 2;
	case 1024:
		return lb << 1;
	default:
		return lb;
	}
}
EXPORT_SYMBOL(target_to_linux_sector);

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
int target_configure_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;
	int ret;

	if (dev->dev_flags & DF_CONFIGURED) {
		pr_err("se_dev->se_dev_ptr already set for storage"
				" object\n");
		return -EEXIST;
	}

	ret = dev->transport->configure_device(dev);
	if (ret)
		goto out;
	/*
	 * XXX: there is not much point to have two different values here..
	 */
	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;

	/*
	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
	 */
	dev->dev_attrib.hw_max_sectors =
		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
					 dev->dev_attrib.hw_block_size);
901
	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();

	ret = core_setup_alua(dev);
	if (ret)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
				      dev->transport->name);
	if (!dev->tmr_wq) {
		pr_err("Unable to create tmr workqueue for %s\n",
			dev->transport->name);
		ret = -ENOMEM;
		goto out_free_alua;
	}

	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);

	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 */
932
	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
933 934 935 936 937 938 939 940 941 942 943 944
		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->t10_wwn.model[0],
			dev->transport->inquiry_prod, 16);
		strncpy(&dev->t10_wwn.revision[0],
			dev->transport->inquiry_rev, 4);
	}

	scsi_dump_inquiry(dev);

	spin_lock(&hba->device_lock);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
945 946 947 948 949

	mutex_lock(&g_device_mutex);
	list_add_tail(&dev->g_dev_node, &g_device_list);
	mutex_unlock(&g_device_mutex);

950 951
	dev->dev_flags |= DF_CONFIGURED;

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	return 0;

out_free_alua:
	core_alua_free_lu_gp_mem(dev);
out:
	se_release_vpd_for_dev(dev);
	return ret;
}

void target_free_device(struct se_device *dev)
{
	struct se_hba *hba = dev->se_hba;

	WARN_ON(!list_empty(&dev->dev_sep_list));

	if (dev->dev_flags & DF_CONFIGURED) {
		destroy_workqueue(dev->tmr_wq);

970 971 972 973
		mutex_lock(&g_device_mutex);
		list_del(&dev->g_dev_node);
		mutex_unlock(&g_device_mutex);

974 975 976 977 978 979
		spin_lock(&hba->device_lock);
		hba->dev_count--;
		spin_unlock(&hba->device_lock);
	}

	core_alua_free_lu_gp_mem(dev);
980
	core_alua_set_lba_map(dev, NULL, 0, 0);
981 982 983
	core_scsi3_free_all_registrations(dev);
	se_release_vpd_for_dev(dev);

984 985 986
	if (dev->transport->free_prot)
		dev->transport->free_prot(dev);

987 988 989
	dev->transport->free_device(dev);
}

990 991 992 993
int core_dev_setup_virtual_lun0(void)
{
	struct se_hba *hba;
	struct se_device *dev;
994
	char buf[] = "rd_pages=8,rd_nullio=1";
995 996
	int ret;

997
	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
998 999 1000
	if (IS_ERR(hba))
		return PTR_ERR(hba);

1001 1002
	dev = target_alloc_device(hba, "virt_lun0");
	if (!dev) {
1003
		ret = -ENOMEM;
1004
		goto out_free_hba;
1005 1006
	}

1007
	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1008

1009 1010 1011
	ret = target_configure_device(dev);
	if (ret)
		goto out_free_se_dev;
1012

1013 1014
	lun0_hba = hba;
	g_lun0_dev = dev;
1015
	return 0;
1016 1017 1018 1019 1020

out_free_se_dev:
	target_free_device(dev);
out_free_hba:
	core_delete_hba(hba);
1021 1022 1023 1024 1025 1026
	return ret;
}


void core_dev_release_virtual_lun0(void)
{
1027
	struct se_hba *hba = lun0_hba;
1028

1029
	if (!hba)
1030 1031
		return;

1032
	if (g_lun0_dev)
1033
		target_free_device(g_lun0_dev);
1034 1035
	core_delete_hba(hba);
}
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108

/*
 * Common CDB parsing for kernel and user passthrough.
 */
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
	unsigned char *cdb = cmd->t_task_cdb;

	/*
	 * Clear a lun set in the cdb if the initiator talking to use spoke
	 * and old standards version, as we can't assume the underlying device
	 * won't choke up on it.
	 */
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}

	/*
	 * For REPORT LUNS we always need to emulate the response, for everything
	 * else, pass it up.
	 */
	if (cdb[0] == REPORT_LUNS) {
		cmd->execute_cmd = spc_emulate_report_luns;
		return TCM_NO_SENSE;
	}

	/* Set DATA_CDB flag for ops that should have it */
	switch (cdb[0]) {
	case READ_6:
	case READ_10:
	case READ_12:
	case READ_16:
	case WRITE_6:
	case WRITE_10:
	case WRITE_12:
	case WRITE_16:
	case WRITE_VERIFY:
	case WRITE_VERIFY_12:
	case 0x8e: /* WRITE_VERIFY_16 */
	case COMPARE_AND_WRITE:
	case XDWRITEREAD_10:
		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
		break;
	case VARIABLE_LENGTH_CMD:
		switch (get_unaligned_be16(&cdb[8])) {
		case READ_32:
		case WRITE_32:
		case 0x0c: /* WRITE_VERIFY_32 */
		case XDWRITEREAD_32:
			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
			break;
		}
	}

	cmd->execute_cmd = exec_cmd;

	return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);