target_core_tpg.c 21.3 KB
Newer Older
1 2 3 4 5
/*******************************************************************************
 * Filename:  target_core_tpg.c
 *
 * This file contains generic Target Portal Group related functions.
 *
6
 * (c) Copyright 2002-2013 Datera, Inc.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
32
#include <linux/export.h>
33 34 35 36 37 38
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>

#include <target/target_core_base.h>
39 40
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
41

C
Christoph Hellwig 已提交
42
#include "target_core_internal.h"
43
#include "target_core_pr.h"
44 45 46 47 48

extern struct se_device *g_lun0_dev;

static DEFINE_SPINLOCK(tpg_lock);
static LIST_HEAD(tpg_list);
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

/*	core_clear_initiator_node_from_tpg():
 *
 *
 */
static void core_clear_initiator_node_from_tpg(
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{
	int i;
	struct se_dev_entry *deve;
	struct se_lun *lun;

	spin_lock_irq(&nacl->device_list_lock);
	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64
		deve = nacl->device_list[i];
65 66 67 68 69

		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
			continue;

		if (!deve->se_lun) {
70
			pr_err("%s device entries device pointer is"
71
				" NULL, but Initiator has access.\n",
72
				tpg->se_tpg_tfo->get_fabric_name());
73 74 75 76 77
			continue;
		}

		lun = deve->se_lun;
		spin_unlock_irq(&nacl->device_list_lock);
78 79
		core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96

		spin_lock_irq(&nacl->device_list_lock);
	}
	spin_unlock_irq(&nacl->device_list_lock);
}

/*	__core_tpg_get_initiator_node_acl():
 *
 *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
 */
struct se_node_acl *__core_tpg_get_initiator_node_acl(
	struct se_portal_group *tpg,
	const char *initiatorname)
{
	struct se_node_acl *acl;

	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
97
		if (!strcmp(acl->initiatorname, initiatorname))
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
			return acl;
	}

	return NULL;
}

/*	core_tpg_get_initiator_node_acl():
 *
 *
 */
struct se_node_acl *core_tpg_get_initiator_node_acl(
	struct se_portal_group *tpg,
	unsigned char *initiatorname)
{
	struct se_node_acl *acl;

114
	spin_lock_irq(&tpg->acl_node_lock);
115
	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
116
	spin_unlock_irq(&tpg->acl_node_lock);
117

118
	return acl;
119
}
120
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136

/*	core_tpg_add_node_to_devs():
 *
 *
 */
void core_tpg_add_node_to_devs(
	struct se_node_acl *acl,
	struct se_portal_group *tpg)
{
	int i = 0;
	u32 lun_access = 0;
	struct se_lun *lun;
	struct se_device *dev;

	spin_lock(&tpg->tpg_lun_lock);
	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
137
		lun = tpg->tpg_lun_list[i];
138 139 140 141 142 143 144 145 146 147
		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
			continue;

		spin_unlock(&tpg->tpg_lun_lock);

		dev = lun->lun_se_dev;
		/*
		 * By default in LIO-Target $FABRIC_MOD,
		 * demo_mode_write_protect is ON, or READ_ONLY;
		 */
148
		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
149
			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
150 151 152 153 154
		} else {
			/*
			 * Allow only optical drives to issue R/W in default RO
			 * demo mode.
			 */
155
			if (dev->transport->get_device_type(dev) == TYPE_DISK)
156 157 158 159 160
				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
			else
				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
		}

161
		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
162
			" access for LUN in Demo Mode\n",
163 164
			tpg->se_tpg_tfo->get_fabric_name(),
			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
165 166 167
			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
			"READ-WRITE" : "READ-ONLY");

168 169
		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
				lun_access, acl, tpg);
170 171 172 173 174 175 176
		/*
		 * Check to see if there are any existing persistent reservation
		 * APTPL pre-registrations that need to be enabled for this dynamic
		 * LUN ACL now..
		 */
		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
						    lun->unpacked_lun);
177 178 179 180 181 182 183 184 185 186 187 188 189 190
		spin_lock(&tpg->tpg_lun_lock);
	}
	spin_unlock(&tpg->tpg_lun_lock);
}

/*      core_set_queue_depth_for_node():
 *
 *
 */
static int core_set_queue_depth_for_node(
	struct se_portal_group *tpg,
	struct se_node_acl *acl)
{
	if (!acl->queue_depth) {
191
		pr_err("Queue depth for %s Initiator Node: %s is 0,"
192
			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
193 194 195 196 197 198 199
			acl->initiatorname);
		acl->queue_depth = 1;
	}

	return 0;
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
void array_free(void *array, int n)
{
	void **a = array;
	int i;

	for (i = 0; i < n; i++)
		kfree(a[i]);
	kfree(a);
}

static void *array_zalloc(int n, size_t size, gfp_t flags)
{
	void **a;
	int i;

	a = kzalloc(n * sizeof(void*), flags);
	if (!a)
		return NULL;
	for (i = 0; i < n; i++) {
		a[i] = kzalloc(size, flags);
		if (!a[i]) {
			array_free(a, n);
			return NULL;
		}
	}
	return a;
}

228 229 230 231 232 233 234 235 236
/*      core_create_device_list_for_node():
 *
 *
 */
static int core_create_device_list_for_node(struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;
	int i;

237 238
	nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
			sizeof(struct se_dev_entry), GFP_KERNEL);
239 240
	if (!nacl->device_list) {
		pr_err("Unable to allocate memory for"
241
			" struct se_node_acl->device_list\n");
242
		return -ENOMEM;
243 244
	}
	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245
		deve = nacl->device_list[i];
246 247 248 249 250 251 252 253 254 255 256

		atomic_set(&deve->ua_count, 0);
		atomic_set(&deve->pr_ref_count, 0);
		spin_lock_init(&deve->ua_lock);
		INIT_LIST_HEAD(&deve->alua_port_list);
		INIT_LIST_HEAD(&deve->ua_list);
	}

	return 0;
}

257 258
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
		const unsigned char *initiatorname)
259 260 261
{
	struct se_node_acl *acl;

262 263
	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
			GFP_KERNEL);
264
	if (!acl)
265 266 267 268
		return NULL;

	INIT_LIST_HEAD(&acl->acl_list);
	INIT_LIST_HEAD(&acl->acl_sess_list);
269
	kref_init(&acl->acl_kref);
270
	init_completion(&acl->acl_free_comp);
271 272 273
	spin_lock_init(&acl->device_list_lock);
	spin_lock_init(&acl->nacl_sess_lock);
	atomic_set(&acl->acl_pr_ref_count, 0);
274 275 276 277
	if (tpg->se_tpg_tfo->tpg_get_default_depth)
		acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
	else
		acl->queue_depth = 1;
278 279 280 281
	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
	acl->se_tpg = tpg;
	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);

282
	tpg->se_tpg_tfo->set_default_node_attributes(acl);
283

284 285 286 287 288 289 290 291 292 293
	if (core_create_device_list_for_node(acl) < 0)
		goto out_free_acl;
	if (core_set_queue_depth_for_node(tpg, acl) < 0)
		goto out_free_device_list;

	return acl;

out_free_device_list:
	core_free_device_list_for_node(acl, tpg);
out_free_acl:
294
	kfree(acl);
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	return NULL;
}

static void target_add_node_acl(struct se_node_acl *acl)
{
	struct se_portal_group *tpg = acl->se_tpg;

	spin_lock_irq(&tpg->acl_node_lock);
	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
	tpg->num_node_acls++;
	spin_unlock_irq(&tpg->acl_node_lock);

	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
		" Initiator Node: %s\n",
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		acl->dynamic_node_acl ? "DYNAMIC" : "",
		acl->queue_depth,
		tpg->se_tpg_tfo->get_fabric_name(),
		acl->initiatorname);
}

struct se_node_acl *core_tpg_check_initiator_node_acl(
	struct se_portal_group *tpg,
	unsigned char *initiatorname)
{
	struct se_node_acl *acl;

	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
	if (acl)
		return acl;

	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
328 329
		return NULL;

330 331
	acl = target_alloc_node_acl(tpg, initiatorname);
	if (!acl)
332
		return NULL;
333 334
	acl->dynamic_node_acl = 1;

335 336
	/*
	 * Here we only create demo-mode MappedLUNs from the active
337
	 * TPG LUNs if the fabric is not explicitly asking for
338 339
	 * tpg_check_demo_mode_login_only() == 1.
	 */
340 341
	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
342
		core_tpg_add_node_to_devs(acl, tpg);
343

344
	target_add_node_acl(acl);
345 346 347 348 349 350 351 352 353 354 355 356
	return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);

void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
		cpu_relax();
}

void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
J
Jörn Engel 已提交
357
	int i;
358 359 360 361
	struct se_lun *lun;

	spin_lock(&tpg->tpg_lun_lock);
	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
362
		lun = tpg->tpg_lun_list[i];
363 364 365 366 367 368

		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
		    (lun->lun_se_dev == NULL))
			continue;

		spin_unlock(&tpg->tpg_lun_lock);
369
		core_dev_del_lun(tpg, lun);
370 371 372 373 374 375 376 377
		spin_lock(&tpg->tpg_lun_lock);
	}
	spin_unlock(&tpg->tpg_lun_lock);
}
EXPORT_SYMBOL(core_tpg_clear_object_luns);

struct se_node_acl *core_tpg_add_initiator_node_acl(
	struct se_portal_group *tpg,
378
	const char *initiatorname)
379
{
380
	struct se_node_acl *acl;
381

382
	spin_lock_irq(&tpg->acl_node_lock);
383
	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
384
	if (acl) {
385 386
		if (acl->dynamic_node_acl) {
			acl->dynamic_node_acl = 0;
387
			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
388 389
				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
390
			spin_unlock_irq(&tpg->acl_node_lock);
391
			return acl;
392 393
		}

394
		pr_err("ACL entry for %s Initiator"
395
			" Node %s already exists for TPG %u, ignoring"
396 397
			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
398
		spin_unlock_irq(&tpg->acl_node_lock);
399 400
		return ERR_PTR(-EEXIST);
	}
401
	spin_unlock_irq(&tpg->acl_node_lock);
402

403 404
	acl = target_alloc_node_acl(tpg, initiatorname);
	if (!acl)
405 406
		return ERR_PTR(-ENOMEM);

407
	target_add_node_acl(acl);
408 409 410
	return acl;
}

411
void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
412
{
413
	struct se_portal_group *tpg = acl->se_tpg;
414
	LIST_HEAD(sess_list);
415
	struct se_session *sess, *sess_tmp;
416
	unsigned long flags;
J
Jörn Engel 已提交
417
	int rc;
418

419
	spin_lock_irq(&tpg->acl_node_lock);
420 421 422 423 424
	if (acl->dynamic_node_acl) {
		acl->dynamic_node_acl = 0;
	}
	list_del(&acl->acl_list);
	tpg->num_node_acls--;
425
	spin_unlock_irq(&tpg->acl_node_lock);
426

427 428 429 430 431 432
	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
	acl->acl_stop = 1;

	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
				sess_acl_list) {
		if (sess->sess_tearing_down != 0)
433 434
			continue;

435 436 437 438 439 440 441
		target_get_session(sess);
		list_move(&sess->sess_acl_list, &sess_list);
	}
	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);

	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
		list_del(&sess->sess_acl_list);
442

443 444 445 446 447
		rc = tpg->se_tpg_tfo->shutdown_session(sess);
		target_put_session(sess);
		if (!rc)
			continue;
		target_put_session(sess);
448
	}
449 450 451 452 453 454
	target_put_nacl(acl);
	/*
	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
	 * for active fabric session transport_deregister_session() callbacks.
	 */
	wait_for_completion(&acl->acl_free_comp);
455 456 457 458 459

	core_tpg_wait_for_nacl_pr_ref(acl);
	core_clear_initiator_node_from_tpg(acl, tpg);
	core_free_device_list_for_node(acl, tpg);

460
	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
461 462 463
		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
464

465
	kfree(acl);
466 467 468 469 470 471 472 473 474 475 476 477 478 479
}

/*	core_tpg_set_initiator_node_queue_depth():
 *
 *
 */
int core_tpg_set_initiator_node_queue_depth(
	struct se_portal_group *tpg,
	unsigned char *initiatorname,
	u32 queue_depth,
	int force)
{
	struct se_session *sess, *init_sess = NULL;
	struct se_node_acl *acl;
480
	unsigned long flags;
481 482
	int dynamic_acl = 0;

483
	spin_lock_irq(&tpg->acl_node_lock);
484
	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
485 486
	if (!acl) {
		pr_err("Access Control List entry for %s Initiator"
487
			" Node %s does not exists for TPG %hu, ignoring"
488 489
			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
490
		spin_unlock_irq(&tpg->acl_node_lock);
491 492 493 494 495 496
		return -ENODEV;
	}
	if (acl->dynamic_node_acl) {
		acl->dynamic_node_acl = 0;
		dynamic_acl = 1;
	}
497
	spin_unlock_irq(&tpg->acl_node_lock);
498

499
	spin_lock_irqsave(&tpg->session_lock, flags);
500 501 502 503 504
	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
		if (sess->se_node_acl != acl)
			continue;

		if (!force) {
505
			pr_err("Unable to change queue depth for %s"
506 507 508 509
				" Initiator Node: %s while session is"
				" operational.  To forcefully change the queue"
				" depth and force session reinstatement"
				" use the \"force=1\" parameter.\n",
510
				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
511
			spin_unlock_irqrestore(&tpg->session_lock, flags);
512

513
			spin_lock_irq(&tpg->acl_node_lock);
514 515
			if (dynamic_acl)
				acl->dynamic_node_acl = 1;
516
			spin_unlock_irq(&tpg->acl_node_lock);
517 518 519 520 521
			return -EEXIST;
		}
		/*
		 * Determine if the session needs to be closed by our context.
		 */
522
		if (!tpg->se_tpg_tfo->shutdown_session(sess))
523 524 525 526 527 528 529 530 531 532 533
			continue;

		init_sess = sess;
		break;
	}

	/*
	 * User has requested to change the queue depth for a Initiator Node.
	 * Change the value in the Node's struct se_node_acl, and call
	 * core_set_queue_depth_for_node() to add the requested queue depth.
	 *
534
	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
535 536 537 538 539 540
	 * reinstatement to occur if there is an active session for the
	 * $FABRIC_MOD Initiator Node in question.
	 */
	acl->queue_depth = queue_depth;

	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
541
		spin_unlock_irqrestore(&tpg->session_lock, flags);
542 543 544 545
		/*
		 * Force session reinstatement if
		 * core_set_queue_depth_for_node() failed, because we assume
		 * the $FABRIC_MOD has already the set session reinstatement
546
		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
547 548
		 */
		if (init_sess)
549
			tpg->se_tpg_tfo->close_session(init_sess);
550

551
		spin_lock_irq(&tpg->acl_node_lock);
552 553
		if (dynamic_acl)
			acl->dynamic_node_acl = 1;
554
		spin_unlock_irq(&tpg->acl_node_lock);
555 556
		return -EINVAL;
	}
557
	spin_unlock_irqrestore(&tpg->session_lock, flags);
558 559 560 561 562
	/*
	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
	 * forcefully shutdown the $FABRIC_MOD session/nexus.
	 */
	if (init_sess)
563
		tpg->se_tpg_tfo->close_session(init_sess);
564

565
	pr_debug("Successfully changed queue depth to: %d for Initiator"
566
		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
567 568
		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg));
569

570
	spin_lock_irq(&tpg->acl_node_lock);
571 572
	if (dynamic_acl)
		acl->dynamic_node_acl = 1;
573
	spin_unlock_irq(&tpg->acl_node_lock);
574 575 576 577 578

	return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
/*	core_tpg_set_initiator_node_tag():
 *
 *	Initiator nodeacl tags are not used internally, but may be used by
 *	userspace to emulate aliases or groups.
 *	Returns length of newly-set tag or -EINVAL.
 */
int core_tpg_set_initiator_node_tag(
	struct se_portal_group *tpg,
	struct se_node_acl *acl,
	const char *new_tag)
{
	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
		return -EINVAL;

	if (!strncmp("NULL", new_tag, 4)) {
		acl->acl_tag[0] = '\0';
		return 0;
	}

	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);

602 603 604 605 606 607 608
static void core_tpg_lun_ref_release(struct percpu_ref *ref)
{
	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);

	complete(&lun->lun_ref_comp);
}

609 610 611
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
	/* Set in core_dev_setup_virtual_lun0() */
612
	struct se_device *dev = g_lun0_dev;
613 614 615 616 617 618 619 620 621 622 623
	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
	int ret;

	lun->unpacked_lun = 0;
	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
	atomic_set(&lun->lun_acl_count, 0);
	init_completion(&lun->lun_shutdown_comp);
	INIT_LIST_HEAD(&lun->lun_acl_list);
	spin_lock_init(&lun->lun_acl_lock);
	spin_lock_init(&lun->lun_sep_lock);
624
	init_completion(&lun->lun_ref_comp);
625

626
	ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
A
Andy Grover 已提交
627
	if (ret < 0)
628 629
		return ret;

630 631 632 633
	return 0;
}

int core_tpg_register(
634
	const struct target_core_fabric_ops *tfo,
635 636 637 638 639 640 641 642
	struct se_wwn *se_wwn,
	struct se_portal_group *se_tpg,
	void *tpg_fabric_ptr,
	int se_tpg_type)
{
	struct se_lun *lun;
	u32 i;

643 644
	se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
			sizeof(struct se_lun), GFP_KERNEL);
645 646
	if (!se_tpg->tpg_lun_list) {
		pr_err("Unable to allocate struct se_portal_group->"
647 648 649 650 651
				"tpg_lun_list\n");
		return -ENOMEM;
	}

	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
652
		lun = se_tpg->tpg_lun_list[i];
653
		lun->unpacked_lun = i;
654
		lun->lun_link_magic = SE_LUN_LINK_MAGIC;
655 656 657 658 659 660
		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
		atomic_set(&lun->lun_acl_count, 0);
		init_completion(&lun->lun_shutdown_comp);
		INIT_LIST_HEAD(&lun->lun_acl_list);
		spin_lock_init(&lun->lun_acl_lock);
		spin_lock_init(&lun->lun_sep_lock);
661
		init_completion(&lun->lun_ref_comp);
662 663 664 665 666 667 668 669
	}

	se_tpg->se_tpg_type = se_tpg_type;
	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
	se_tpg->se_tpg_tfo = tfo;
	se_tpg->se_tpg_wwn = se_wwn;
	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
	INIT_LIST_HEAD(&se_tpg->acl_node_list);
670
	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
671 672 673 674 675 676 677
	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
	spin_lock_init(&se_tpg->acl_node_lock);
	spin_lock_init(&se_tpg->session_lock);
	spin_lock_init(&se_tpg->tpg_lun_lock);

	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
678 679
			array_free(se_tpg->tpg_lun_list,
				   TRANSPORT_MAX_LUNS_PER_TPG);
680 681 682 683
			return -ENOMEM;
		}
	}

684 685 686
	spin_lock_bh(&tpg_lock);
	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
	spin_unlock_bh(&tpg_lock);
687

688
	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
689 690 691 692 693 694 695 696 697 698 699
		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));

	return 0;
}
EXPORT_SYMBOL(core_tpg_register);

int core_tpg_deregister(struct se_portal_group *se_tpg)
{
700 701
	struct se_node_acl *nacl, *nacl_tmp;

702
	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
703 704
		" for endpoint: %s Portal Tag %u\n",
		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
705 706 707
		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
708

709 710 711
	spin_lock_bh(&tpg_lock);
	list_del(&se_tpg->se_tpg_node);
	spin_unlock_bh(&tpg_lock);
712 713 714

	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
		cpu_relax();
715 716 717 718 719
	/*
	 * Release any remaining demo-mode generated se_node_acl that have
	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
	 * in transport_deregister_session().
	 */
720
	spin_lock_irq(&se_tpg->acl_node_lock);
721 722 723 724
	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
			acl_list) {
		list_del(&nacl->acl_list);
		se_tpg->num_node_acls--;
725
		spin_unlock_irq(&se_tpg->acl_node_lock);
726 727 728

		core_tpg_wait_for_nacl_pr_ref(nacl);
		core_free_device_list_for_node(nacl, se_tpg);
729
		kfree(nacl);
730

731
		spin_lock_irq(&se_tpg->acl_node_lock);
732
	}
733
	spin_unlock_irq(&se_tpg->acl_node_lock);
734 735

	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736
		core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
737 738

	se_tpg->se_tpg_fabric_ptr = NULL;
739
	array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
740 741 742 743
	return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);

744
struct se_lun *core_tpg_alloc_lun(
745 746 747 748 749 750
	struct se_portal_group *tpg,
	u32 unpacked_lun)
{
	struct se_lun *lun;

	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
751
		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
752
			"-1: %u for Target Portal Group: %u\n",
753
			tpg->se_tpg_tfo->get_fabric_name(),
754
			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
755
			tpg->se_tpg_tfo->tpg_get_tag(tpg));
756 757 758 759
		return ERR_PTR(-EOVERFLOW);
	}

	spin_lock(&tpg->tpg_lun_lock);
760
	lun = tpg->tpg_lun_list[unpacked_lun];
761
	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
762
		pr_err("TPG Logical Unit Number: %u is already active"
763
			" on %s Target Portal Group: %u, ignoring request.\n",
764 765
			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
			tpg->se_tpg_tfo->tpg_get_tag(tpg));
766 767 768 769 770 771 772 773
		spin_unlock(&tpg->tpg_lun_lock);
		return ERR_PTR(-EINVAL);
	}
	spin_unlock(&tpg->tpg_lun_lock);

	return lun;
}

774
int core_tpg_add_lun(
775 776 777
	struct se_portal_group *tpg,
	struct se_lun *lun,
	u32 lun_access,
778
	struct se_device *dev)
779
{
780 781
	int ret;

782
	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
783
			      GFP_KERNEL);
784 785
	if (ret < 0)
		return ret;
786

787
	ret = core_dev_export(dev, tpg, lun);
788
	if (ret < 0) {
789
		percpu_ref_exit(&lun->lun_ref);
790 791 792
		return ret;
	}

793 794 795 796 797 798 799 800
	spin_lock(&tpg->tpg_lun_lock);
	lun->lun_access = lun_access;
	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
	spin_unlock(&tpg->tpg_lun_lock);

	return 0;
}

801
void core_tpg_remove_lun(
802 803 804
	struct se_portal_group *tpg,
	struct se_lun *lun)
{
805 806
	core_clear_lun_from_tpg(lun, tpg);
	transport_clear_lun_ref(lun);
807 808 809 810 811 812 813

	core_dev_unexport(lun->lun_se_dev, tpg, lun);

	spin_lock(&tpg->tpg_lun_lock);
	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
	spin_unlock(&tpg->tpg_lun_lock);

814
	percpu_ref_exit(&lun->lun_ref);
815
}