target_core_transport.c 84.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40
#include <linux/ratelimit.h>
41 42 43 44 45
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
46
#include <scsi/scsi_tcq.h>
47 48

#include <target/target_core_base.h>
49 50
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
51 52
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
53
#include "target_core_internal.h"
54 55 56 57
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

58
static int sub_api_initialized;
59

60
static struct workqueue_struct *target_completion_wq;
61 62 63 64 65 66 67 68 69
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static void transport_complete_task_attr(struct se_cmd *cmd);
70
static void transport_handle_queue_full(struct se_cmd *cmd,
71
		struct se_device *dev);
72
static int transport_generic_get_mem(struct se_cmd *cmd);
73
static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
74
static void transport_put_cmd(struct se_cmd *cmd);
75
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
76
static void target_complete_ok_work(struct work_struct *work);
77

78
int init_se_kmem_caches(void)
79 80 81 82
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
83 84
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
85
				" failed\n");
86
		goto out;
87 88 89 90
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
91 92
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
93
		goto out_free_sess_cache;
94 95 96 97
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
98 99
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
100
				" failed\n");
101
		goto out_free_ua_cache;
102 103 104 105
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
106 107
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
108
				" failed\n");
109
		goto out_free_pr_reg_cache;
110 111 112 113
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
114 115
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
116
				"cache failed\n");
117
		goto out_free_lu_gp_cache;
118 119 120 121
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
122 123
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
124
				"cache failed\n");
125
		goto out_free_lu_gp_mem_cache;
126 127 128 129 130 131
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
132 133
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
134
				"mem_t failed\n");
135
		goto out_free_tg_pt_gp_cache;
136 137
	}

138 139 140 141 142
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

143
	return 0;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
159
out:
160
	return -ENOMEM;
161 162
}

163
void release_se_kmem_caches(void)
164
{
165
	destroy_workqueue(target_completion_wq);
166 167 168 169 170 171 172 173 174
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

175 176 177
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
178 179 180 181 182 183 184 185

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

186
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
187

188 189 190
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
191 192 193 194

	return new_index;
}

195
void transport_subsystem_check_init(void)
196 197 198
{
	int ret;

199 200 201
	if (sub_api_initialized)
		return;

202 203
	ret = request_module("target_core_iblock");
	if (ret != 0)
204
		pr_err("Unable to load target_core_iblock\n");
205 206 207

	ret = request_module("target_core_file");
	if (ret != 0)
208
		pr_err("Unable to load target_core_file\n");
209 210 211

	ret = request_module("target_core_pscsi");
	if (ret != 0)
212
		pr_err("Unable to load target_core_pscsi\n");
213 214 215

	ret = request_module("target_core_stgt");
	if (ret != 0)
216
		pr_err("Unable to load target_core_stgt\n");
217

218
	sub_api_initialized = 1;
219
	return;
220 221 222 223 224 225 226
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
227 228
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
229 230 231 232 233
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
234 235
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
236
	kref_init(&se_sess->sess_kref);
237 238 239 240 241 242

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
243
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
266
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
267
			memset(&buf[0], 0, PR_REG_ISID_LEN);
268
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
269 270 271
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
272 273
		kref_get(&se_nacl->acl_kref);

274 275 276 277 278 279 280 281 282 283 284 285 286
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

287
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
288
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
289 290 291 292 293 294 295 296 297
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
298 299 300
	unsigned long flags;

	spin_lock_irqsave(&se_tpg->session_lock, flags);
301
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
302
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
303 304 305
}
EXPORT_SYMBOL(transport_register_session);

306
void target_release_session(struct kref *kref)
307 308 309 310 311 312 313 314 315 316 317 318 319 320
{
	struct se_session *se_sess = container_of(kref,
			struct se_session, sess_kref);
	struct se_portal_group *se_tpg = se_sess->se_tpg;

	se_tpg->se_tpg_tfo->close_session(se_sess);
}

void target_get_session(struct se_session *se_sess)
{
	kref_get(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);

321
void target_put_session(struct se_session *se_sess)
322
{
323 324 325 326 327 328
	struct se_portal_group *tpg = se_sess->se_tpg;

	if (tpg->se_tpg_tfo->put_session != NULL) {
		tpg->se_tpg_tfo->put_session(se_sess);
		return;
	}
329
	kref_put(&se_sess->sess_kref, target_release_session);
330 331 332
}
EXPORT_SYMBOL(target_put_session);

333 334 335 336 337 338 339 340 341 342 343 344 345
static void target_complete_nacl(struct kref *kref)
{
	struct se_node_acl *nacl = container_of(kref,
				struct se_node_acl, acl_kref);

	complete(&nacl->acl_free_comp);
}

void target_put_nacl(struct se_node_acl *nacl)
{
	kref_put(&nacl->acl_kref, target_complete_nacl);
}

346 347 348
void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
349
	unsigned long flags;
350 351 352 353
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
354
	if (se_nacl) {
355
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
356 357
		if (se_nacl->acl_stop == 0)
			list_del(&se_sess->sess_acl_list);
358 359 360 361 362 363 364 365 366 367 368 369
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
370
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
371 372 373 374 375 376 377 378 379 380 381 382 383
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
384
	struct target_core_fabric_ops *se_tfo;
385
	struct se_node_acl *se_nacl;
386
	unsigned long flags;
387
	bool comp_nacl = true;
388

389
	if (!se_tpg) {
390 391 392
		transport_free_session(se_sess);
		return;
	}
393
	se_tfo = se_tpg->se_tpg_tfo;
394

395
	spin_lock_irqsave(&se_tpg->session_lock, flags);
396 397 398
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
399
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
400 401 402 403 404 405

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
406 407 408 409 410 411 412 413 414 415 416 417 418

	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
	if (se_nacl && se_nacl->dynamic_node_acl) {
		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
			list_del(&se_nacl->acl_list);
			se_tpg->num_node_acls--;
			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
			core_tpg_wait_for_nacl_pr_ref(se_nacl);
			core_free_device_list_for_node(se_nacl, se_tpg);
			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);

			comp_nacl = false;
			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
419 420
		}
	}
421
	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
422

423
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
424
		se_tpg->se_tpg_tfo->get_fabric_name());
425
	/*
426 427 428
	 * If last kref is dropping now for an explict NodeACL, awake sleeping
	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
	 * removal context.
429 430
	 */
	if (se_nacl && comp_nacl == true)
431
		target_put_nacl(se_nacl);
432

433
	transport_free_session(se_sess);
434 435 436 437
}
EXPORT_SYMBOL(transport_deregister_session);

/*
438
 * Called with cmd->t_state_lock held.
439
 */
440
static void target_remove_from_state_list(struct se_cmd *cmd)
441
{
442
	struct se_device *dev = cmd->se_dev;
443 444
	unsigned long flags;

445 446
	if (!dev)
		return;
447

448 449
	if (cmd->transport_state & CMD_T_BUSY)
		return;
450

451 452 453 454
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (cmd->state_active) {
		list_del(&cmd->state_list);
		cmd->state_active = false;
455
	}
456
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
457 458
}

459
static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
460 461 462
{
	unsigned long flags;

463
	spin_lock_irqsave(&cmd->t_state_lock, flags);
464 465 466 467
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
468 469 470
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
471

472
		cmd->transport_state &= ~CMD_T_ACTIVE;
473
		if (remove_from_lists)
474
			target_remove_from_state_list(cmd);
475
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
476

477
		complete(&cmd->transport_lun_stop_comp);
478 479
		return 1;
	}
480 481 482 483 484 485 486 487 488 489

	if (remove_from_lists) {
		target_remove_from_state_list(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the handoff to FE.
		 */
		cmd->se_lun = NULL;
	}

490 491
	/*
	 * Determine if frontend context caller is requesting the stopping of
492
	 * this command for frontend exceptions.
493
	 */
494 495 496
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
497
			cmd->se_tfo->get_task_tag(cmd));
498

499
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
500

501
		complete(&cmd->t_transport_stop_comp);
502 503
		return 1;
	}
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

	cmd->transport_state &= ~CMD_T_ACTIVE;
	if (remove_from_lists) {
		/*
		 * Some fabric modules like tcm_loop can release
		 * their internally allocated I/O reference now and
		 * struct se_cmd now.
		 *
		 * Fabric modules are expected to return '1' here if the
		 * se_cmd being passed is released at this point,
		 * or zero if not being released.
		 */
		if (cmd->se_tfo->check_stop_free != NULL) {
			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
			return cmd->se_tfo->check_stop_free(cmd);
519
		}
520
	}
521

522
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
523 524 525 526 527
	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
528
	return transport_cmd_check_stop(cmd, true);
529 530 531 532
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
533
	struct se_lun *lun = cmd->se_lun;
534 535 536 537 538
	unsigned long flags;

	if (!lun)
		return;

539
	spin_lock_irqsave(&cmd->t_state_lock, flags);
540 541
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
542
		target_remove_from_state_list(cmd);
543
	}
544
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
545 546

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
547 548
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
549 550 551 552 553
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
554
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
555
		transport_lun_remove_cmd(cmd);
556 557 558

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
559
	if (remove)
560
		transport_put_cmd(cmd);
561 562
}

563 564 565 566
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

567
	transport_generic_request_failure(cmd);
568 569
}

570
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
571
{
572
	struct se_device *dev = cmd->se_dev;
573
	int success = scsi_status == GOOD;
574 575
	unsigned long flags;

576 577 578
	cmd->scsi_status = scsi_status;


579
	spin_lock_irqsave(&cmd->t_state_lock, flags);
580
	cmd->transport_state &= ~CMD_T_BUSY;
581 582

	if (dev && dev->transport->transport_complete) {
583 584
		if (dev->transport->transport_complete(cmd,
				cmd->t_data_sg) != 0) {
585 586 587 588 589 590
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
			success = 1;
		}
	}

	/*
591
	 * See if we are waiting to complete for an exception condition.
592
	 */
593
	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
594
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
595
		complete(&cmd->task_stop_comp);
596 597
		return;
	}
598 599

	if (!success)
600
		cmd->transport_state |= CMD_T_FAILED;
601

602 603 604 605 606 607 608 609 610 611
	/*
	 * Check for case where an explict ABORT_TASK has been received
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->t_transport_stop_comp);
		return;
	} else if (cmd->transport_state & CMD_T_FAILED) {
612
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
613
		INIT_WORK(&cmd->work, target_complete_failure_work);
614
	} else {
615
		INIT_WORK(&cmd->work, target_complete_ok_work);
616
	}
617 618

	cmd->t_state = TRANSPORT_COMPLETE;
619
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
620
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
621

622
	queue_work(target_completion_wq, &cmd->work);
623
}
624 625
EXPORT_SYMBOL(target_complete_cmd);

626
static void target_add_to_state_list(struct se_cmd *cmd)
627
{
628 629
	struct se_device *dev = cmd->se_dev;
	unsigned long flags;
630

631 632 633 634
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (!cmd->state_active) {
		list_add_tail(&cmd->state_list, &dev->state_list);
		cmd->state_active = true;
635
	}
636
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
637 638
}

639
/*
640
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
641
 */
642 643
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
644 645 646 647 648

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
649
	LIST_HEAD(qf_cmd_list);
650 651 652
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
653 654
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
655

656
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
657 658 659 660
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

661
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
662
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
663
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
664 665
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
666

667 668 669 670
		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
			transport_write_pending_qf(cmd);
		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
			transport_complete_qf(cmd);
671 672 673
	}
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

717
	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
718 719 720
	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
		dev->se_sub_dev->se_dev_attrib.block_size,
		dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
774
		pr_debug("%s", buf);
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
799 800
	int ret = 0;
	int len;
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
817
		ret = -EINVAL;
818 819 820 821 822 823
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
824
		pr_debug("%s", buf);
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
847 848
	int ret = 0;
	int len;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
875
		ret = -EINVAL;
876 877 878
		break;
	}

879 880 881
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
882
		strncpy(p_buf, buf, p_buf_len);
883
	} else {
884
		pr_debug("%s", buf);
885
	}
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
928
		ret = -EINVAL;
929 930 931 932 933 934
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
935
		pr_debug("%s", buf);
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
986
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
987 988 989 990 991
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
992
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
993 994
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
995 996 997 998
}

static void scsi_dump_inquiry(struct se_device *dev)
{
999
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1000
	char buf[17];
1001 1002 1003 1004 1005 1006
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1007
			buf[i] = wwn->vendor[i];
1008
		else
1009 1010 1011
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1012 1013 1014

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1015
			buf[i] = wwn->model[i];
1016
		else
1017 1018 1019
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1020 1021 1022

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1023
			buf[i] = wwn->revision[i];
1024
		else
1025 1026 1027
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1028

1029
	device_type = dev->transport->get_device_type(dev);
1030 1031
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1032
				dev->transport->get_device_rev(dev));
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1045
	int force_pt;
1046 1047 1048
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1049 1050
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1051 1052 1053 1054 1055
		return NULL;
	}

	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1056
	dev->dev_ptr		= transport_dev;
1057 1058 1059 1060 1061 1062 1063
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1064
	INIT_LIST_HEAD(&dev->state_list);
1065
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1066 1067 1068 1069 1070 1071
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1072
	spin_lock_init(&dev->qf_cmd_lock);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
1101
		goto err_dev_list;
1102 1103 1104 1105

	/*
	 * Startup the struct se_device processing thread
	 */
1106 1107 1108 1109
	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
				      dev->transport->name);
	if (!dev->tmr_wq) {
		pr_err("Unable to create tmr workqueue for %s\n",
1110
			dev->transport->name);
1111
		goto err_dev_list;
1112
	}
1113 1114 1115 1116
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1117 1118 1119 1120 1121 1122 1123 1124
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1125
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1126
		if (!inquiry_prod || !inquiry_rev) {
1127
			pr_err("All non TCM/pSCSI plugins require"
1128
				" INQUIRY consts\n");
1129
			goto err_wq;
1130 1131
		}

1132 1133 1134
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1135 1136 1137
	}
	scsi_dump_inquiry(dev);

1138
	return dev;
1139

1140 1141 1142
err_wq:
	destroy_workqueue(dev->tmr_wq);
err_dev_list:
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

1156
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
{
	struct se_device *dev = cmd->se_dev;

	if (cmd->unknown_data_length) {
		cmd->data_length = size;
	} else if (size != cmd->data_length) {
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
				cmd->data_length, size, cmd->t_task_cdb[0]);

		if (cmd->data_direction == DMA_TO_DEVICE) {
			pr_err("Rejecting underflow/overflow"
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_CDB.
		 */
		if (dev->se_sub_dev->se_dev_attrib.block_size != 512)  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
				" CDB on non 512-byte sector setup subsystem"
				" plugin: %s\n", dev->transport->name);
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

	return 0;

out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
	return -EINVAL;
}

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1216 1217
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1218
	INIT_LIST_HEAD(&cmd->se_qf_node);
1219
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1220
	INIT_LIST_HEAD(&cmd->state_list);
1221 1222 1223
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1224
	init_completion(&cmd->cmd_wait_comp);
1225
	init_completion(&cmd->task_stop_comp);
1226
	spin_lock_init(&cmd->t_state_lock);
1227
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1228 1229 1230 1231 1232 1233 1234

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
1235 1236

	cmd->state_active = false;
1237 1238 1239 1240 1241 1242 1243 1244 1245
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1246
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1247 1248
		return 0;

1249
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1250
		pr_debug("SAM Task Attribute ACA"
1251
			" emulation is not supported\n");
1252
		return -EINVAL;
1253 1254 1255 1256 1257
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1258
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1259
	smp_mb__after_atomic_inc();
1260
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1261
			cmd->se_ordered_id, cmd->sam_task_attr,
1262
			cmd->se_dev->transport->name);
1263 1264 1265
	return 0;
}

1266
/*	target_setup_cmd_from_cdb():
1267 1268 1269
 *
 *	Called from fabric RX Thread.
 */
1270
int target_setup_cmd_from_cdb(
1271 1272 1273
	struct se_cmd *cmd,
	unsigned char *cdb)
{
1274 1275 1276 1277
	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
	u32 pr_reg_type = 0;
	u8 alua_ascq = 0;
	unsigned long flags;
1278 1279 1280 1281 1282 1283 1284
	int ret;

	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1285
		pr_err("Received SCSI CDB with command_size: %d that"
1286 1287
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1288 1289
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1290
		return -EINVAL;
1291 1292 1293 1294 1295 1296
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1297 1298
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1299
						GFP_KERNEL);
1300 1301
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1302
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1303
				scsi_command_size(cdb),
1304
				(unsigned long)sizeof(cmd->__t_task_cdb));
1305 1306 1307
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1308
			return -ENOMEM;
1309 1310
		}
	} else
1311
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1312
	/*
1313
	 * Copy the original CDB into cmd->
1314
	 */
1315
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367

	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
		return -EINVAL;
	}

	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
	if (ret != 0) {
		/*
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
			pr_debug("[%s]: ALUA TG Port not available, "
				"SenseKey: NOT_READY, ASC/ASCQ: "
				"0x04/0x%02x\n",
				cmd->se_tfo->get_fabric_name(), alua_ascq);

			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
			return -EINVAL;
		}
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
		return -EINVAL;
	}

	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

1368
	ret = cmd->se_dev->transport->parse_cdb(cmd);
1369 1370
	if (ret < 0)
		return ret;
1371 1372 1373 1374 1375

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

1376 1377 1378 1379 1380 1381
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1382
		return -EINVAL;
1383 1384 1385 1386 1387 1388 1389
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
1390
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1391

1392 1393 1394 1395 1396 1397 1398
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1399 1400
	int ret;

1401 1402
	if (!cmd->se_lun) {
		dump_stack();
1403
		pr_err("cmd->se_lun is NULL\n");
1404 1405 1406 1407
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1408
		pr_err("transport_generic_handle_cdb cannot be called"
1409 1410 1411
				" from interrupt context\n");
		return -EINVAL;
	}
1412
	/*
1413 1414 1415
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
	 * outstanding descriptors are handled correctly during shutdown via
	 * transport_wait_for_tasks()
1416 1417 1418 1419 1420
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1421 1422
	cmd->transport_state |= CMD_T_ACTIVE;

1423 1424 1425 1426 1427 1428
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1429 1430 1431
	if (ret < 0)
		transport_generic_request_failure(cmd);

1432
	return 0;
1433 1434 1435
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
1449 1450 1451 1452
 * Returns non zero to signal active I/O shutdown failure.  All other
 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
 * but still return zero here.
 *
1453 1454 1455
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1456
int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
1474 1475
	if (flags & TARGET_SCF_UNKNOWN_SIZE)
		se_cmd->unknown_data_length = 1;
1476 1477 1478 1479 1480 1481
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
1482 1483
	rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	if (rc)
1484
		return rc;
1485 1486 1487 1488 1489 1490 1491 1492
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1493 1494 1495 1496
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
1497
		return 0;
1498
	}
1499

1500
	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1501 1502
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
1503
		return 0;
1504
	}
1505 1506 1507 1508 1509 1510 1511

	/*
	 * Check if we need to delay processing because of ALUA
	 * Active/NonOptimized primary access state..
	 */
	core_alua_check_nonop_delay(se_cmd);

1512
	transport_handle_cdb_direct(se_cmd);
1513
	return 0;
1514 1515 1516
}
EXPORT_SYMBOL(target_submit_cmd);

1517 1518 1519 1520 1521 1522 1523 1524 1525
static void target_complete_tmr_failure(struct work_struct *work)
{
	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);

	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
	transport_generic_free_cmd(se_cmd, 0);
}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1536 1537
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
1538
 * @flags: submit cmd flags
1539 1540 1541 1542
 *
 * Callable from all contexts.
 **/

1543
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1544
		unsigned char *sense, u32 unpacked_lun,
1545 1546
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, unsigned int tag, int flags)
1547 1548 1549 1550 1551 1552 1553 1554 1555
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1556 1557 1558 1559
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
1560
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1561 1562
	if (ret < 0)
		return -ENOMEM;
1563

1564 1565 1566
	if (tm_type == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = tag;

1567
	/* See target_submit_cmd for commentary */
1568 1569 1570 1571 1572
	ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	if (ret) {
		core_tmr_release_req(se_cmd->se_tmr_req);
		return ret;
	}
1573 1574 1575

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1576 1577 1578 1579 1580 1581
		/*
		 * For callback during failure handling, push this work off
		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
		 */
		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
		schedule_work(&se_cmd->work);
1582
		return 0;
1583 1584
	}
	transport_generic_handle_tmr(se_cmd);
1585
	return 0;
1586 1587 1588
}
EXPORT_SYMBOL(target_submit_tmr);

1589
/*
1590
 * If the cmd is active, request it to be stopped and sleep until it
1591 1592
 * has completed.
 */
1593
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1594 1595 1596
{
	bool was_active = false;

1597 1598
	if (cmd->transport_state & CMD_T_BUSY) {
		cmd->transport_state |= CMD_T_REQUEST_STOP;
1599 1600
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

1601 1602 1603
		pr_debug("cmd %p waiting to complete\n", cmd);
		wait_for_completion(&cmd->task_stop_comp);
		pr_debug("cmd %p stopped successfully\n", cmd);
1604 1605

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1606 1607
		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
		cmd->transport_state &= ~CMD_T_BUSY;
1608 1609 1610 1611 1612 1613
		was_active = true;
	}

	return was_active;
}

1614 1615 1616
/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1617
void transport_generic_request_failure(struct se_cmd *cmd)
1618
{
1619 1620
	int ret = 0;

1621
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1622
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1623
		cmd->t_task_cdb[0]);
1624
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1625
		cmd->se_tfo->get_cmd_state(cmd),
1626
		cmd->t_state, cmd->scsi_sense_reason);
1627
	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1628 1629 1630
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1631 1632 1633 1634 1635 1636 1637

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1638 1639 1640 1641 1642 1643 1644 1645
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
1646
	case TCM_ADDRESS_OUT_OF_RANGE:
1647 1648 1649
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1650
		break;
1651
	case TCM_RESERVATION_CONFLICT:
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
1666 1667 1668
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1669 1670 1671
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

1672
		ret = cmd->se_tfo->queue_status(cmd);
1673
		if (ret == -EAGAIN || ret == -ENOMEM)
1674
			goto queue_full;
1675 1676
		goto check_stop;
	default:
1677
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1678
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1679 1680 1681
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
1682

1683 1684 1685 1686
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
1687

1688 1689
check_stop:
	transport_lun_remove_cmd(cmd);
1690
	if (!transport_cmd_check_stop_to_fabric(cmd))
1691
		;
1692 1693 1694
	return;

queue_full:
1695 1696
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
1697
}
1698
EXPORT_SYMBOL(transport_generic_request_failure);
1699

1700
static void __target_execute_cmd(struct se_cmd *cmd)
1701
{
1702
	int error = 0;
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719

	spin_lock_irq(&cmd->t_state_lock);
	cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
	spin_unlock_irq(&cmd->t_state_lock);

	if (cmd->execute_cmd)
		error = cmd->execute_cmd(cmd);

	if (error) {
		spin_lock_irq(&cmd->t_state_lock);
		cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
		spin_unlock_irq(&cmd->t_state_lock);

		transport_generic_request_failure(cmd);
	}
}

1720
void target_execute_cmd(struct se_cmd *cmd)
1721 1722 1723
{
	struct se_device *dev = cmd->se_dev;

1724 1725 1726 1727 1728 1729
	/*
	 * If the received CDB has aleady been aborted stop processing it here.
	 */
	if (transport_check_aborted_status(cmd, 1))
		return;

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
	spin_lock_irq(&cmd->t_state_lock);
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));

		cmd->transport_state &= ~CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
		complete(&cmd->transport_lun_stop_comp);
		return;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
	 * this command for frontend exceptions.
	 */
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
			cmd->se_tfo->get_task_tag(cmd));

		spin_unlock_irq(&cmd->t_state_lock);
		complete(&cmd->t_transport_stop_comp);
1755
		return;
1756 1757 1758 1759
	}

	cmd->t_state = TRANSPORT_PROCESSING;
	spin_unlock_irq(&cmd->t_state_lock);
1760 1761 1762 1763

	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
		goto execute;

1764
	/*
L
Lucas De Marchi 已提交
1765
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1766 1767
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
1768 1769 1770 1771 1772 1773 1774 1775
	switch (cmd->sam_task_attr) {
	case MSG_HEAD_TAG:
		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
			 "se_ordered_id: %u\n",
			 cmd->t_task_cdb[0], cmd->se_ordered_id);
		goto execute;
	case MSG_ORDERED_TAG:
		atomic_inc(&dev->dev_ordered_sync);
1776 1777
		smp_mb__after_atomic_inc();

1778 1779 1780 1781
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
			 " se_ordered_id: %u\n",
			 cmd->t_task_cdb[0], cmd->se_ordered_id);

1782
		/*
1783 1784
		 * Execute an ORDERED command if no other older commands
		 * exist that need to be completed first.
1785
		 */
1786 1787 1788 1789
		if (!atomic_read(&dev->simple_cmds))
			goto execute;
		break;
	default:
1790 1791 1792
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
1793
		atomic_inc(&dev->simple_cmds);
1794
		smp_mb__after_atomic_inc();
1795
		break;
1796
	}
1797 1798 1799 1800 1801

	if (atomic_read(&dev->dev_ordered_sync) != 0) {
		spin_lock(&dev->delayed_cmd_lock);
		list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
		spin_unlock(&dev->delayed_cmd_lock);
1802

1803
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1804
			" delayed CMD list, se_ordered_id: %u\n",
1805
			cmd->t_task_cdb[0], cmd->sam_task_attr,
1806
			cmd->se_ordered_id);
1807
		return;
1808 1809
	}

1810
execute:
1811
	/*
1812
	 * Otherwise, no ORDERED task attributes exist..
1813
	 */
1814
	__target_execute_cmd(cmd);
1815
}
1816
EXPORT_SYMBOL(target_execute_cmd);
1817 1818 1819 1820 1821 1822 1823

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
1824
	struct se_device *dev = cmd->se_dev;
1825 1826 1827
	unsigned long flags;
	u32 offset = 0;

1828 1829
	WARN_ON(!cmd->se_lun);

1830 1831 1832
	if (!dev)
		return 0;

1833
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1834
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
1835
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1836 1837 1838
		return 0;
	}

1839 1840
	if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
		goto out;
1841

1842 1843 1844 1845
	if (!dev->transport->get_sense_buffer) {
		pr_err("dev->transport->get_sense_buffer is NULL\n");
		goto out;
	}
1846

1847
	sense_buffer = dev->transport->get_sense_buffer(cmd);
1848
	if (!sense_buffer) {
1849
		pr_err("ITT 0x%08x cmd %p: Unable to locate"
1850
			" sense buffer for task with sense\n",
1851
			cmd->se_tfo->get_task_tag(cmd), cmd);
1852
		goto out;
1853
	}
1854

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);

	memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);

	/* Automatically padded */
	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;

	pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
	return 0;

1868
out:
1869
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1870 1871 1872
	return -1;
}

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
/*
 * Process all commands up to the last received ORDERED task attribute which
 * requires another blocking boundary
 */
static void target_restart_delayed_cmds(struct se_device *dev)
{
	for (;;) {
		struct se_cmd *cmd;

		spin_lock(&dev->delayed_cmd_lock);
		if (list_empty(&dev->delayed_cmd_list)) {
			spin_unlock(&dev->delayed_cmd_lock);
			break;
		}

		cmd = list_entry(dev->delayed_cmd_list.next,
				 struct se_cmd, se_delayed_node);
		list_del(&cmd->se_delayed_node);
		spin_unlock(&dev->delayed_cmd_lock);

		__target_execute_cmd(cmd);

		if (cmd->sam_task_attr == MSG_ORDERED_TAG)
			break;
	}
}

1900
/*
1901
 * Called from I/O completion to determine which dormant/delayed
1902 1903 1904 1905
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
1906
	struct se_device *dev = cmd->se_dev;
1907

1908
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1909 1910 1911
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
1912
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1913 1914
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
1915
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1916
		dev->dev_cur_ordered_id++;
1917
		pr_debug("Incremented dev_cur_ordered_id: %u for"
1918 1919
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
1920
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1921 1922 1923 1924
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
1925
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
1926 1927 1928
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}

1929
	target_restart_delayed_cmds(dev);
1930 1931
}

1932
static void transport_complete_qf(struct se_cmd *cmd)
1933 1934 1935
{
	int ret = 0;

1936 1937 1938 1939 1940 1941 1942 1943
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
1944 1945 1946 1947 1948 1949

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
1950
		if (cmd->t_bidi_data_sg) {
1951 1952
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
1953
				break;
1954 1955 1956 1957 1958 1959 1960 1961 1962
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

1963 1964 1965 1966 1967 1968 1969
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
1970 1971 1972 1973
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
1974
	struct se_device *dev)
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

1985
static void target_complete_ok_work(struct work_struct *work)
1986
{
1987
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
1988
	int reason = 0, ret;
1989

1990 1991 1992 1993 1994
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
1995
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1996
		transport_complete_task_attr(cmd);
1997 1998 1999 2000 2001 2002 2003
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

2004 2005 2006 2007 2008 2009 2010 2011 2012
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		if (cmd->scsi_status) {
2013
			ret = transport_send_check_condition_and_sense(
2014
					cmd, reason, 1);
2015
			if (ret == -EAGAIN || ret == -ENOMEM)
2016 2017
				goto queue_full;

2018 2019 2020 2021 2022 2023
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
2024
	 * Check for a callback, used by amongst other things
2025 2026 2027 2028 2029 2030 2031 2032
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
2033 2034
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2035 2036 2037 2038
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

2039
		ret = cmd->se_tfo->queue_data_in(cmd);
2040
		if (ret == -EAGAIN || ret == -ENOMEM)
2041
			goto queue_full;
2042 2043 2044
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
2045 2046
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
2047 2048 2049 2050 2051 2052
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
2053
		if (cmd->t_bidi_data_sg) {
2054
			spin_lock(&cmd->se_lun->lun_sep_lock);
2055 2056
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2057 2058 2059
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
2060
			ret = cmd->se_tfo->queue_data_in(cmd);
2061
			if (ret == -EAGAIN || ret == -ENOMEM)
2062
				goto queue_full;
2063 2064 2065 2066
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
2067
		ret = cmd->se_tfo->queue_status(cmd);
2068
		if (ret == -EAGAIN || ret == -ENOMEM)
2069
			goto queue_full;
2070 2071 2072 2073 2074 2075 2076
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
2077 2078 2079
	return;

queue_full:
2080
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2081
		" data_direction: %d\n", cmd, cmd->data_direction);
2082 2083
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
2084 2085
}

2086
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2087
{
2088 2089
	struct scatterlist *sg;
	int count;
2090

2091 2092
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
2093

2094 2095
	kfree(sgl);
}
2096

2097 2098 2099 2100 2101 2102
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2103 2104
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
2105

2106
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2107 2108
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
2109 2110
}

C
Christoph Hellwig 已提交
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

2122
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
2123 2124 2125 2126
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
2127 2128
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
2129
	 */
2130 2131 2132 2133
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
2134 2135 2136
	cmd->se_tfo->release_cmd(cmd);
}

2137 2138 2139 2140 2141 2142
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
2143
static void transport_put_cmd(struct se_cmd *cmd)
2144 2145 2146
{
	unsigned long flags;

2147
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2148 2149 2150 2151 2152
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

2153 2154
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2155
		target_remove_from_state_list(cmd);
2156
	}
2157
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2158 2159

	transport_free_pages(cmd);
2160
	transport_release_cmd(cmd);
2161
	return;
2162 2163
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2164 2165 2166
}

/*
2167 2168
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
2180 2181 2182 2183
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
2184
{
2185
	if (!sgl || !sgl_count)
2186 2187
		return 0;

2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
	/*
	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
	 * scatterlists already have been set to follow what the fabric
	 * passes for the original expected data transfer length.
	 */
	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		pr_warn("Rejecting SCSI DATA overflow for fabric using"
			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
		return -EINVAL;
	}
2200

2201 2202
	cmd->t_data_sg = sgl;
	cmd->t_data_nents = sgl_count;
2203

2204 2205 2206
	if (sgl_bidi && sgl_bidi_count) {
		cmd->t_bidi_data_sg = sgl_bidi;
		cmd->t_bidi_data_nents = sgl_bidi_count;
2207
	}
2208
	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
2209 2210 2211 2212
	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

2213
void *transport_kmap_data_sg(struct se_cmd *cmd)
2214
{
2215
	struct scatterlist *sg = cmd->t_data_sg;
2216 2217
	struct page **pages;
	int i;
2218

2219
	BUG_ON(!sg);
2220
	/*
2221 2222 2223
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2224
	 */
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2246
}
2247
EXPORT_SYMBOL(transport_kmap_data_sg);
2248

2249
void transport_kunmap_data_sg(struct se_cmd *cmd)
2250
{
2251
	if (!cmd->t_data_nents) {
2252
		return;
2253
	} else if (cmd->t_data_nents == 1) {
2254
		kunmap(sg_page(cmd->t_data_sg));
2255 2256
		return;
	}
2257 2258 2259

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
2260
}
2261
EXPORT_SYMBOL(transport_kunmap_data_sg);
2262

2263
static int
2264
transport_generic_get_mem(struct se_cmd *cmd)
2265
{
2266 2267 2268
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
2269
	gfp_t zero_flag;
2270
	int i = 0;
2271

2272 2273 2274 2275
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
2276

2277 2278
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
2279

2280
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
2281

2282 2283
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
2284
		page = alloc_page(GFP_KERNEL | zero_flag);
2285 2286
		if (!page)
			goto out;
2287

2288 2289 2290
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
2291 2292 2293
	}
	return 0;

2294
out:
2295
	while (i > 0) {
2296
		i--;
2297
		__free_page(sg_page(&cmd->t_data_sg[i]));
2298
	}
2299 2300 2301
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
2302 2303
}

2304
/*
2305 2306 2307
 * Allocate any required resources to execute the command.  For writes we
 * might not have the payload yet, so notify the fabric via a call to
 * ->write_pending instead. Otherwise place it on the execution queue.
2308
 */
2309
int transport_generic_new_cmd(struct se_cmd *cmd)
2310 2311 2312 2313 2314 2315
{
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2316
	 * beforehand.
2317
	 */
2318 2319
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
2320
		ret = transport_generic_get_mem(cmd);
2321
		if (ret < 0)
2322
			goto out_fail;
2323
	}
2324 2325 2326 2327 2328 2329
	/*
	 * If this command doesn't have any payload and we don't have to call
	 * into the fabric for data transfers, go ahead and complete it right
	 * away.
	 */
	if (!cmd->data_length) {
2330
		spin_lock_irq(&cmd->t_state_lock);
2331
		cmd->t_state = TRANSPORT_COMPLETE;
2332 2333
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
2334 2335 2336 2337 2338 2339 2340 2341

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

2342 2343 2344 2345
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
2346

2347 2348
	atomic_inc(&cmd->t_fe_count);

2349
	/*
2350 2351 2352
	 * If this command is not a write we can execute it right here,
	 * for write buffers we need to notify the fabric driver first
	 * and let it call back once the write buffers are ready.
2353
	 */
2354
	target_add_to_state_list(cmd);
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
	if (cmd->data_direction != DMA_TO_DEVICE) {
		target_execute_cmd(cmd);
		return 0;
	}

	spin_lock_irq(&cmd->t_state_lock);
	cmd->t_state = TRANSPORT_WRITE_PENDING;
	spin_unlock_irq(&cmd->t_state_lock);

	transport_cmd_check_stop(cmd, false);

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;

	if (ret < 0)
		return ret;
	return 1;
2373 2374 2375 2376 2377

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
2378 2379 2380 2381 2382
queue_full:
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
	transport_handle_queue_full(cmd, cmd->se_dev);
	return 0;
2383
}
2384
EXPORT_SYMBOL(transport_generic_new_cmd);
2385

2386
static void transport_write_pending_qf(struct se_cmd *cmd)
2387
{
2388 2389 2390 2391
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
2392 2393 2394 2395
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
2396 2397
}

2398
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2399
{
2400
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2401
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2402 2403
			 transport_wait_for_tasks(cmd);

2404
		transport_release_cmd(cmd);
2405 2406 2407 2408
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

2409 2410
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

2411
		if (cmd->se_lun)
2412 2413
			transport_lun_remove_cmd(cmd);

2414
		transport_put_cmd(cmd);
2415 2416 2417 2418
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

2419 2420 2421
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
2422
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2423
 */
2424 2425
static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			       bool ack_kref)
2426 2427
{
	unsigned long flags;
2428
	int ret = 0;
2429

2430
	kref_init(&se_cmd->cmd_kref);
2431 2432 2433 2434 2435
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
2436
	if (ack_kref == true) {
2437
		kref_get(&se_cmd->cmd_kref);
2438 2439
		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
	}
2440

2441
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2442 2443 2444 2445
	if (se_sess->sess_tearing_down) {
		ret = -ESHUTDOWN;
		goto out;
	}
2446 2447
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
2448 2449

out:
2450
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2451
	return ret;
2452 2453
}

2454
static void target_release_cmd_kref(struct kref *kref)
2455
{
2456 2457
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
2458 2459 2460 2461 2462
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2463
		se_cmd->se_tfo->release_cmd(se_cmd);
2464
		return;
2465 2466 2467 2468
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
2469
		return;
2470 2471 2472 2473
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2484 2485 2486
}
EXPORT_SYMBOL(target_put_sess_cmd);

2487 2488 2489 2490
/* target_sess_cmd_list_set_waiting - Flag all commands in
 *         sess_cmd_list to complete cmd_wait_comp.  Set
 *         sess_tearing_down so no more commands are queued.
 * @se_sess:	session to flag
2491
 */
2492
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2493 2494 2495 2496 2497 2498
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);

2499 2500
	WARN_ON(se_sess->sess_tearing_down);
	se_sess->sess_tearing_down = 1;
2501

2502
	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
2503 2504 2505 2506
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
2507
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
2521
				&se_sess->sess_cmd_list, se_cmd_list) {
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

2552 2553 2554 2555 2556 2557 2558 2559
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
2560 2561
	int ret = 0;

2562 2563 2564 2565
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
2566
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2567 2568 2569 2570 2571
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
2572
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2573
		transport_cmd_check_stop(cmd, false);
2574
		return -EPERM;
2575
	}
2576
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
2577
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2578

2579 2580 2581 2582 2583 2584 2585
	// XXX: audit task_flags checks.
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if ((cmd->transport_state & CMD_T_BUSY) &&
	    (cmd->transport_state & CMD_T_SENT)) {
		if (!target_stop_cmd(cmd, &flags))
			ret++;
	}
2586
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2587

2588 2589
	pr_debug("ConfigFS: cmd: %p stop tasks ret:"
			" %d\n", cmd, ret);
2590
	if (!ret) {
2591
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2592
				cmd->se_tfo->get_task_tag(cmd));
2593
		wait_for_completion(&cmd->transport_lun_stop_comp);
2594
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2595
				cmd->se_tfo->get_task_tag(cmd));
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
	}

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2610 2611 2612
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
2613
		list_del_init(&cmd->se_lun_node);
2614

2615
		spin_lock(&cmd->t_state_lock);
2616
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
2617
			"_lun_stop for  ITT: 0x%08x\n",
2618 2619
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
2620
		cmd->transport_state |= CMD_T_LUN_STOP;
2621
		spin_unlock(&cmd->t_state_lock);
2622 2623 2624

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

2625 2626
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2627 2628
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2629 2630 2631 2632 2633 2634
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
2635
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2636 2637
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
2638

2639
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
2640 2641 2642 2643
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

2644
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2645
			"_wait_for_tasks(): SUCCESS\n",
2646 2647
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
2648

2649
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2650
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
2651
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2652 2653
			goto check_cond;
		}
2654
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2655
		target_remove_from_state_list(cmd);
2656
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671

		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
2672
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2673
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
2674
			pr_debug("SE_LUN[%d] - Detected FE stop for"
2675 2676
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
2677
				cmd, cmd->se_tfo->get_task_tag(cmd));
2678

2679
			spin_unlock_irqrestore(&cmd->t_state_lock,
2680
					cmd_flags);
2681
			transport_cmd_check_stop(cmd, false);
2682
			complete(&cmd->transport_lun_fe_stop_comp);
2683 2684 2685
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
2686
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2687
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
2688

2689
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2690 2691 2692 2693 2694 2695 2696
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
2697
	struct se_lun *lun = p;
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

2709
	kt = kthread_run(transport_clear_lun_thread, lun,
2710 2711
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
2712
		pr_err("Unable to start clear_lun thread\n");
2713
		return PTR_ERR(kt);
2714 2715 2716 2717 2718 2719
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

2720 2721 2722
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
2723
 *
2724 2725
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
2726
 */
2727
bool transport_wait_for_tasks(struct se_cmd *cmd)
2728 2729 2730
{
	unsigned long flags;

2731
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2732 2733
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2734
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2735
		return false;
2736
	}
2737

2738 2739
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2740
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2741
		return false;
2742
	}
2743 2744 2745
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
2746
	 * The cmd->transport_lun_stopped_sem will be upped by
2747 2748 2749
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
2750
	if (cmd->transport_state & CMD_T_LUN_STOP) {
2751
		pr_debug("wait_for_tasks: Stopping"
2752
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
2753
			"_stop_comp); for ITT: 0x%08x\n",
2754
			cmd->se_tfo->get_task_tag(cmd));
2755 2756 2757 2758 2759 2760 2761
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
2762 2763 2764 2765
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
2766

2767
		target_remove_from_state_list(cmd);
2768 2769 2770 2771 2772
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
2773
		pr_debug("wait_for_tasks: Stopped"
2774
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2775
			"stop_comp); for ITT: 0x%08x\n",
2776
			cmd->se_tfo->get_task_tag(cmd));
2777

2778
		cmd->transport_state &= ~CMD_T_LUN_STOP;
2779
	}
2780

2781
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2782
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2783
		return false;
2784
	}
2785

2786
	cmd->transport_state |= CMD_T_STOP;
2787

2788
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
2789
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
2790 2791
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2792

2793
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2794

2795
	wait_for_completion(&cmd->t_transport_stop_comp);
2796

2797
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2798
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2799

2800
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
2801
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
2802
		cmd->se_tfo->get_task_tag(cmd));
2803

2804
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2805 2806

	return true;
2807
}
2808
EXPORT_SYMBOL(transport_wait_for_tasks);
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

2842
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2843
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2844
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2845 2846 2847
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2848
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
2861
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2862 2863 2864 2865 2866 2867 2868
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
2869 2870
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2871
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2872 2873 2874 2875 2876
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
2877 2878 2879 2880
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2881
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2882 2883 2884 2885 2886 2887 2888 2889
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2890
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2891 2892 2893 2894 2895 2896 2897 2898
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2899
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2900 2901 2902 2903 2904 2905 2906 2907 2908
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2909
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2920
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2921 2922
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2923 2924 2925 2926 2927 2928
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2929
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2930 2931
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2932 2933 2934 2935 2936 2937
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2938
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2949
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2960
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2961 2962 2963 2964 2965 2966 2967 2968 2969 2970
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2971
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2972 2973 2974 2975 2976
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
2977 2978 2979 2980 2981 2982 2983 2984 2985
	case TCM_ADDRESS_OUT_OF_RANGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL BLOCK ADDRESS OUT OF RANGE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
		break;
2986 2987 2988
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2989
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
2990 2991 2992 2993 2994 2995 2996 2997 2998
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
2999
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3010
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
3028
	return cmd->se_tfo->queue_status(cmd);
3029 3030 3031 3032 3033 3034 3035
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

3036
	if (cmd->transport_state & CMD_T_ABORTED) {
3037
		if (!send_status ||
3038 3039
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
3040

3041
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
3042
			" status for CDB: 0x%02x ITT: 0x%08x\n",
3043
			cmd->t_task_cdb[0],
3044
			cmd->se_tfo->get_task_tag(cmd));
3045

3046
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
3047
		cmd->se_tfo->queue_status(cmd);
3048 3049 3050 3051 3052 3053 3054 3055
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
3056 3057 3058 3059 3060 3061 3062 3063 3064
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

3065 3066 3067 3068 3069 3070 3071
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
3072
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3073
			cmd->transport_state |= CMD_T_ABORTED;
3074 3075 3076 3077
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3078

3079
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
3080
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
3081
		cmd->se_tfo->get_task_tag(cmd));
3082

3083
	cmd->se_tfo->queue_status(cmd);
3084 3085
}

3086
static void target_tmr_work(struct work_struct *work)
3087
{
3088
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3089
	struct se_device *dev = cmd->se_dev;
3090 3091 3092 3093
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
3094
	case TMR_ABORT_TASK:
3095
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
3096
		break;
3097 3098 3099
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
3100 3101
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
3102
	case TMR_LUN_RESET:
3103 3104 3105 3106
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
3107
	case TMR_TARGET_WARM_RESET:
3108 3109
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
3110
	case TMR_TARGET_COLD_RESET:
3111 3112 3113
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
3114
		pr_err("Uknown TMR function: 0x%02x.\n",
3115 3116 3117 3118 3119 3120
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3121
	cmd->se_tfo->queue_tm_rsp(cmd);
3122

3123
	transport_cmd_check_stop_to_fabric(cmd);
3124 3125
}

3126 3127
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
3128
{
3129 3130
	INIT_WORK(&cmd->work, target_tmr_work);
	queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3131 3132
	return 0;
}
3133
EXPORT_SYMBOL(transport_generic_handle_tmr);