target_core_transport.c 142.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
44
#include <scsi/scsi_tcq.h>
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>

#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_scdb.h"
#include "target_core_ua.h"

60
static int sub_api_initialized;
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

/* Used for transport_dev_get_map_*() */
typedef int (*map_func_t)(struct se_task *, u32);

static int transport_generic_write_pending(struct se_cmd *);
76
static int transport_processing_thread(void *param);
77 78
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
79 80 81
static int transport_complete_qf(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
		struct se_device *dev, int (*qf_callback)(struct se_cmd *));
82 83
static void transport_direct_request_timeout(struct se_cmd *cmd);
static void transport_free_dev_tasks(struct se_cmd *cmd);
84
static u32 transport_allocate_tasks(struct se_cmd *cmd,
85
		unsigned long long starting_lba,
86
		enum dma_data_direction data_direction,
87
		struct scatterlist *sgl, unsigned int nents);
88
static int transport_generic_get_mem(struct se_cmd *cmd);
89
static int transport_generic_remove(struct se_cmd *cmd,
90
		int session_reinstatement);
91 92 93 94 95 96
static void transport_release_fe_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
		struct se_queue_obj *qobj);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_stop_all_task_timers(struct se_cmd *cmd);

97
int init_se_kmem_caches(void)
98 99 100
{
	se_cmd_cache = kmem_cache_create("se_cmd_cache",
			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
101 102
	if (!se_cmd_cache) {
		pr_err("kmem_cache_create for struct se_cmd failed\n");
103 104 105 106 107
		goto out;
	}
	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
			0, NULL);
108 109
	if (!se_tmr_req_cache) {
		pr_err("kmem_cache_create() for struct se_tmr_req"
110 111 112 113 114 115
				" failed\n");
		goto out;
	}
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
116 117
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
118 119 120 121 122 123
				" failed\n");
		goto out;
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
124 125
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
126 127 128 129 130
		goto out;
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
131 132
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
133 134 135 136 137 138
				" failed\n");
		goto out;
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
139 140
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
141 142 143 144 145 146
				" failed\n");
		goto out;
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
147 148
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
149 150 151 152 153 154
				"cache failed\n");
		goto out;
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
155 156
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
157 158 159 160 161 162 163 164
				"cache failed\n");
		goto out;
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
165 166
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
				"mem_t failed\n");
		goto out;
	}

	return 0;
out:
	if (se_cmd_cache)
		kmem_cache_destroy(se_cmd_cache);
	if (se_tmr_req_cache)
		kmem_cache_destroy(se_tmr_req_cache);
	if (se_sess_cache)
		kmem_cache_destroy(se_sess_cache);
	if (se_ua_cache)
		kmem_cache_destroy(se_ua_cache);
	if (t10_pr_reg_cache)
		kmem_cache_destroy(t10_pr_reg_cache);
	if (t10_alua_lu_gp_cache)
		kmem_cache_destroy(t10_alua_lu_gp_cache);
	if (t10_alua_lu_gp_mem_cache)
		kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	if (t10_alua_tg_pt_gp_cache)
		kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	if (t10_alua_tg_pt_gp_mem_cache)
		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
191
	return -ENOMEM;
192 193
}

194
void release_se_kmem_caches(void)
195 196 197 198 199 200 201 202 203 204 205 206
{
	kmem_cache_destroy(se_cmd_cache);
	kmem_cache_destroy(se_tmr_req_cache);
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

207 208 209
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
210 211 212 213 214 215 216 217

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

218
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
219

220 221 222
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
223 224 225 226

	return new_index;
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
void transport_init_queue_obj(struct se_queue_obj *qobj)
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}
EXPORT_SYMBOL(transport_init_queue_obj);

static int transport_subsystem_reqmods(void)
{
	int ret;

	ret = request_module("target_core_iblock");
	if (ret != 0)
242
		pr_err("Unable to load target_core_iblock\n");
243 244 245

	ret = request_module("target_core_file");
	if (ret != 0)
246
		pr_err("Unable to load target_core_file\n");
247 248 249

	ret = request_module("target_core_pscsi");
	if (ret != 0)
250
		pr_err("Unable to load target_core_pscsi\n");
251 252 253

	ret = request_module("target_core_stgt");
	if (ret != 0)
254
		pr_err("Unable to load target_core_stgt\n");
255 256 257 258 259 260

	return 0;
}

int transport_subsystem_check_init(void)
{
261 262 263
	int ret;

	if (sub_api_initialized)
264 265 266 267
		return 0;
	/*
	 * Request the loading of known TCM subsystem plugins..
	 */
268 269 270
	ret = transport_subsystem_reqmods();
	if (ret < 0)
		return ret;
271

272
	sub_api_initialized = 1;
273 274 275 276 277 278 279 280
	return 0;
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
281 282
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
317
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
318
			memset(&buf[0], 0, PR_REG_ISID_LEN);
319
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

336
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
337
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	spin_lock_bh(&se_tpg->session_lock);
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
	spin_unlock_bh(&se_tpg->session_lock);
}
EXPORT_SYMBOL(transport_register_session);

void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
356
	unsigned long flags;
357 358 359 360
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
361
	if (se_nacl) {
362
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
363 364 365 366 367 368 369 370 371 372 373 374 375
		list_del(&se_sess->sess_acl_list);
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
376
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
377 378 379 380 381 382 383 384 385 386 387 388 389 390
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
	struct se_node_acl *se_nacl;
391
	unsigned long flags;
392

393
	if (!se_tpg) {
394 395 396 397
		transport_free_session(se_sess);
		return;
	}

398
	spin_lock_irqsave(&se_tpg->session_lock, flags);
399 400 401
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
402
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
403 404 405 406 407 408

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
409
	if (se_nacl) {
410
		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411
		if (se_nacl->dynamic_node_acl) {
412 413
			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
					se_tpg)) {
414 415
				list_del(&se_nacl->acl_list);
				se_tpg->num_node_acls--;
416
				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
417 418 419

				core_tpg_wait_for_nacl_pr_ref(se_nacl);
				core_free_device_list_for_node(se_nacl, se_tpg);
420
				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421
						se_nacl);
422
				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
423 424
			}
		}
425
		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
426 427 428 429
	}

	transport_free_session(se_sess);

430
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
431
		se_tpg->se_tpg_tfo->get_fabric_name());
432 433 434 435
}
EXPORT_SYMBOL(transport_deregister_session);

/*
436
 * Called with cmd->t_state_lock held.
437 438 439 440 441 442 443
 */
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
	struct se_device *dev;
	struct se_task *task;
	unsigned long flags;

444
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
445
		dev = task->se_dev;
446
		if (!dev)
447 448 449 450 451
			continue;

		if (atomic_read(&task->task_active))
			continue;

452
		if (!atomic_read(&task->task_state_active))
453 454 455 456
			continue;

		spin_lock_irqsave(&dev->execute_task_lock, flags);
		list_del(&task->t_state_list);
457 458
		pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
			cmd->se_tfo->get_task_tag(cmd), dev, task);
459 460 461
		spin_unlock_irqrestore(&dev->execute_task_lock, flags);

		atomic_set(&task->task_state_active, 0);
462
		atomic_dec(&cmd->t_task_cdbs_ex_left);
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	}
}

/*	transport_cmd_check_stop():
 *
 *	'transport_off = 1' determines if t_transport_active should be cleared.
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

481
	spin_lock_irqsave(&cmd->t_state_lock, flags);
482 483 484 485
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
486
	if (atomic_read(&cmd->transport_lun_stop)) {
487
		pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
488
			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
489
			cmd->se_tfo->get_task_tag(cmd));
490 491 492

		cmd->deferred_t_state = cmd->t_state;
		cmd->t_state = TRANSPORT_DEFERRED_CMD;
493
		atomic_set(&cmd->t_transport_active, 0);
494 495
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);
496
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
497

498
		complete(&cmd->transport_lun_stop_comp);
499 500 501 502
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
503
	 * this command for frontend exceptions.
504
	 */
505
	if (atomic_read(&cmd->t_transport_stop)) {
506
		pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
507
			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
508
			cmd->se_tfo->get_task_tag(cmd));
509 510 511 512 513 514 515 516 517 518 519 520

		cmd->deferred_t_state = cmd->t_state;
		cmd->t_state = TRANSPORT_DEFERRED_CMD;
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
521
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
522

523
		complete(&cmd->t_transport_stop_comp);
524 525 526
		return 1;
	}
	if (transport_off) {
527
		atomic_set(&cmd->t_transport_active, 0);
528 529 530 531 532 533 534 535 536
		if (transport_off == 2) {
			transport_all_task_dev_remove_state(cmd);
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
537
			 * their internally allocated I/O reference now and
538 539
			 * struct se_cmd now.
			 */
540
			if (cmd->se_tfo->check_stop_free != NULL) {
541
				spin_unlock_irqrestore(
542
					&cmd->t_state_lock, flags);
543

544
				cmd->se_tfo->check_stop_free(cmd);
545 546 547
				return 1;
			}
		}
548
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
549 550 551 552

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
553
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
554 555 556 557 558 559 560 561 562 563 564

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
565
	struct se_lun *lun = cmd->se_lun;
566 567 568 569 570
	unsigned long flags;

	if (!lun)
		return;

571
	spin_lock_irqsave(&cmd->t_state_lock, flags);
572
	if (!atomic_read(&cmd->transport_dev_active)) {
573
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
574 575
		goto check_lun;
	}
576
	atomic_set(&cmd->transport_dev_active, 0);
577
	transport_all_task_dev_remove_state(cmd);
578
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
579 580 581 582


check_lun:
	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
583
	if (atomic_read(&cmd->transport_lun_active)) {
584
		list_del(&cmd->se_lun_node);
585
		atomic_set(&cmd->transport_lun_active, 0);
586
#if 0
587
		pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
588
			cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
589 590 591 592 593 594 595 596 597 598 599
#endif
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
	transport_lun_remove_cmd(cmd);

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
600 601
	if (remove) {
		transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
602
		transport_generic_remove(cmd, 0);
603
	}
604 605 606 607
}

void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
{
608
	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
609 610 611 612

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;

613
	transport_generic_remove(cmd, 0);
614 615
}

616
static void transport_add_cmd_to_queue(
617 618 619 620
	struct se_cmd *cmd,
	int t_state)
{
	struct se_device *dev = cmd->se_dev;
621
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
622 623 624
	unsigned long flags;

	if (t_state) {
625
		spin_lock_irqsave(&cmd->t_state_lock, flags);
626
		cmd->t_state = t_state;
627 628
		atomic_set(&cmd->t_transport_active, 1);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
629 630 631
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
632 633 634 635 636 637 638

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

639 640 641 642 643
	if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
		cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
	} else
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
644
	atomic_set(&cmd->t_transport_queue_active, 1);
645 646 647 648 649
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

650 651
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
652
{
653
	struct se_cmd *cmd;
654 655 656 657 658 659 660
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
661
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
662

663
	atomic_set(&cmd->t_transport_queue_active, 0);
664

665
	list_del_init(&cmd->se_queue_node);
666 667 668
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

669
	return cmd;
670 671 672 673 674 675 676 677
}

static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
		struct se_queue_obj *qobj)
{
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
678
	if (!atomic_read(&cmd->t_transport_queue_active)) {
679 680 681
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
682 683 684
	atomic_set(&cmd->t_transport_queue_active, 0);
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
685 686
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

687
	if (atomic_read(&cmd->t_transport_queue_active)) {
688
		pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
689
			cmd->se_tfo->get_task_tag(cmd),
690
			atomic_read(&cmd->t_transport_queue_active));
691 692 693 694 695 696 697 698 699
	}
}

/*
 * Completion function used by TCM subsystem plugins (such as FILEIO)
 * for queueing up response from struct se_subsystem_api->do_task()
 */
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
700
	struct se_task *task = list_entry(cmd->t_task_list.next,
701 702 703 704 705 706 707 708
				struct se_task, t_list);

	if (good) {
		cmd->scsi_status = SAM_STAT_GOOD;
		task->task_scsi_status = GOOD;
	} else {
		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
709
		task->task_se_cmd->transport_error_status =
710 711 712 713 714 715 716 717 718 719 720 721 722 723
					PYX_TRANSPORT_ILLEGAL_REQUEST;
	}

	transport_complete_task(task, good);
}
EXPORT_SYMBOL(transport_complete_sync_cache);

/*	transport_complete_task():
 *
 *	Called from interrupt and non interrupt context depending
 *	on the transport plugin.
 */
void transport_complete_task(struct se_task *task, int success)
{
724
	struct se_cmd *cmd = task->task_se_cmd;
725 726 727 728
	struct se_device *dev = task->se_dev;
	int t_state;
	unsigned long flags;
#if 0
729
	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
730
			cmd->t_task_cdb[0], dev);
731
#endif
732
	if (dev)
733 734
		atomic_inc(&dev->depth_left);

735
	spin_lock_irqsave(&cmd->t_state_lock, flags);
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	atomic_set(&task->task_active, 0);

	/*
	 * See if any sense data exists, if so set the TASK_SENSE flag.
	 * Also check for any other post completion work that needs to be
	 * done by the plugins.
	 */
	if (dev && dev->transport->transport_complete) {
		if (dev->transport->transport_complete(task) != 0) {
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
			task->task_sense = 1;
			success = 1;
		}
	}

	/*
	 * See if we are waiting for outstanding struct se_task
	 * to complete for an exception condition
	 */
	if (atomic_read(&task->task_stop)) {
		/*
757
		 * Decrement cmd->t_se_count if this task had
758 759 760
		 * previously thrown its timeout exception handler.
		 */
		if (atomic_read(&task->task_timeout)) {
761
			atomic_dec(&cmd->t_se_count);
762 763
			atomic_set(&task->task_timeout, 0);
		}
764
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
765 766 767 768 769 770 771 772 773 774

		complete(&task->task_stop_comp);
		return;
	}
	/*
	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
	 * left counter to determine when the struct se_cmd is ready to be queued to
	 * the processing thread.
	 */
	if (atomic_read(&task->task_timeout)) {
775 776
		if (!atomic_dec_and_test(
				&cmd->t_task_cdbs_timeout_left)) {
777
			spin_unlock_irqrestore(&cmd->t_state_lock,
778 779 780 781
				flags);
			return;
		}
		t_state = TRANSPORT_COMPLETE_TIMEOUT;
782
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
783 784 785 786

		transport_add_cmd_to_queue(cmd, t_state);
		return;
	}
787
	atomic_dec(&cmd->t_task_cdbs_timeout_left);
788 789 790 791 792 793

	/*
	 * Decrement the outstanding t_task_cdbs_left count.  The last
	 * struct se_task from struct se_cmd will complete itself into the
	 * device queue depending upon int success.
	 */
794
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
795
		if (!success)
796
			cmd->t_tasks_failed = 1;
797

798
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
799 800 801
		return;
	}

802
	if (!success || cmd->t_tasks_failed) {
803 804 805 806 807 808 809 810
		t_state = TRANSPORT_COMPLETE_FAILURE;
		if (!task->task_error_status) {
			task->task_error_status =
				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
			cmd->transport_error_status =
				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
		}
	} else {
811
		atomic_set(&cmd->t_transport_complete, 1);
812 813
		t_state = TRANSPORT_COMPLETE_OK;
	}
814
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845

	transport_add_cmd_to_queue(cmd, t_state);
}
EXPORT_SYMBOL(transport_complete_task);

/*
 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
 * struct se_task list are ready to be added to the active execution list
 * struct se_device

 * Called with se_dev_t->execute_task_lock called.
 */
static inline int transport_add_task_check_sam_attr(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	/*
	 * No SAM Task attribute emulation enabled, add to tail of
	 * execution queue
	 */
	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
		return 0;
	}
	/*
	 * HEAD_OF_QUEUE attribute for received CDB, which means
	 * the first task that is associated with a struct se_cmd goes to
	 * head of the struct se_device->execute_task_list, and task_prev
	 * after that for each subsequent task
	 */
846
	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
847 848 849 850 851
		list_add(&task->t_execute_list,
				(task_prev != NULL) ?
				&task_prev->t_execute_list :
				&dev->execute_task_list);

852
		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
853
				" in execution queue\n",
854
				task->task_se_cmd->t_task_cdb[0]);
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
		return 1;
	}
	/*
	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
	 * transitioned from Dermant -> Active state, and are added to the end
	 * of the struct se_device->execute_task_list
	 */
	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
	return 0;
}

/*	__transport_add_task_to_execute_queue():
 *
 *	Called with se_dev_t->execute_task_lock called.
 */
static void __transport_add_task_to_execute_queue(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	int head_of_queue;

	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
	atomic_inc(&dev->execute_tasks);

	if (atomic_read(&task->task_state_active))
		return;
	/*
	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
	 * state list as well.  Running with SAM Task Attribute emulation
	 * will always return head_of_queue == 0 here
	 */
	if (head_of_queue)
		list_add(&task->t_state_list, (task_prev) ?
				&task_prev->t_state_list :
				&dev->state_task_list);
	else
		list_add_tail(&task->t_state_list, &dev->state_task_list);

	atomic_set(&task->task_state_active, 1);

896
	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
897
		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
898 899 900 901 902 903 904 905 906
		task, dev);
}

static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
	struct se_device *dev;
	struct se_task *task;
	unsigned long flags;

907 908
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
909 910 911 912 913 914 915 916 917
		dev = task->se_dev;

		if (atomic_read(&task->task_state_active))
			continue;

		spin_lock(&dev->execute_task_lock);
		list_add_tail(&task->t_state_list, &dev->state_task_list);
		atomic_set(&task->task_state_active, 1);

918 919
		pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
			task->task_se_cmd->se_tfo->get_task_tag(
920 921 922 923
			task->task_se_cmd), task, dev);

		spin_unlock(&dev->execute_task_lock);
	}
924
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
925 926 927 928
}

static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
929
	struct se_device *dev = cmd->se_dev;
930 931 932 933
	struct se_task *task, *task_prev = NULL;
	unsigned long flags;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
934
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
		if (atomic_read(&task->task_execute_queue))
			continue;
		/*
		 * __transport_add_task_to_execute_queue() handles the
		 * SAM Task Attribute emulation if enabled
		 */
		__transport_add_task_to_execute_queue(task, task_prev, dev);
		atomic_set(&task->task_execute_queue, 1);
		task_prev = task;
	}
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

/*	transport_remove_task_from_execute_queue():
 *
 *
 */
952
void transport_remove_task_from_execute_queue(
953 954 955 956 957
	struct se_task *task,
	struct se_device *dev)
{
	unsigned long flags;

958 959 960 961 962
	if (atomic_read(&task->task_execute_queue) == 0) {
		dump_stack();
		return;
	}

963 964
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	list_del(&task->t_execute_list);
965
	atomic_set(&task->task_execute_queue, 0);
966 967 968 969
	atomic_dec(&dev->execute_tasks);
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

970 971 972 973 974 975 976 977
/*
 * Handle QUEUE_FULL / -EAGAIN status
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
978
	LIST_HEAD(qf_cmd_list);
979 980 981
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
982 983
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
984

985
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
986 987 988 989
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

990
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
991 992 993 994 995 996 997 998 999 1000 1001 1002
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
			(cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
		/*
		 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
		 * has been added to head of queue
		 */
		transport_add_cmd_to_queue(cmd, cmd->t_state);
	}
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
		dev->queue_depth);
	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1050
		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1051 1052 1053 1054 1055 1056 1057 1058 1059
	*bl += sprintf(b + *bl, "        ");
}

/*	transport_release_all_cmds():
 *
 *
 */
static void transport_release_all_cmds(struct se_device *dev)
{
1060
	struct se_cmd *cmd, *tcmd;
1061 1062 1063
	int bug_out = 0, t_state;
	unsigned long flags;

1064
	spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1065 1066 1067
	list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
				se_queue_node) {
		t_state = cmd->t_state;
1068
		list_del_init(&cmd->se_queue_node);
1069
		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1070 1071
				flags);

1072
		pr_err("Releasing ITT: 0x%08x, i_state: %u,"
1073
			" t_state: %u directly\n",
1074 1075
			cmd->se_tfo->get_task_tag(cmd),
			cmd->se_tfo->get_cmd_state(cmd), t_state);
1076 1077 1078 1079

		transport_release_fe_cmd(cmd);
		bug_out = 1;

1080
		spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1081
	}
1082
	spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
#if 0
	if (bug_out)
		BUG();
#endif
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1139
		pr_debug("%s", buf);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1164 1165
	int ret = 0;
	int len;
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1182
		ret = -EINVAL;
1183 1184 1185 1186 1187 1188
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1189
		pr_debug("%s", buf);
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1212 1213
	int ret = 0;
	int len;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1240
		ret = -EINVAL;
1241 1242 1243
		break;
	}

1244 1245 1246
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1247
		strncpy(p_buf, buf, p_buf_len);
1248
	} else {
1249
		pr_debug("%s", buf);
1250
	}
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1293
		ret = -EINVAL;
1294 1295 1296 1297 1298 1299
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1300
		pr_debug("%s", buf);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1351
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1352 1353 1354 1355 1356
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1357
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1358 1359
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1360 1361 1362 1363
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1364
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1365 1366 1367 1368
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
1369
	pr_debug("  Vendor: ");
1370 1371
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1372
			pr_debug("%c", wwn->vendor[i]);
1373
		else
1374
			pr_debug(" ");
1375

1376
	pr_debug("  Model: ");
1377 1378
	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1379
			pr_debug("%c", wwn->model[i]);
1380
		else
1381
			pr_debug(" ");
1382

1383
	pr_debug("  Revision: ");
1384 1385
	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1386
			pr_debug("%c", wwn->revision[i]);
1387
		else
1388
			pr_debug(" ");
1389

1390
	pr_debug("\n");
1391

1392
	device_type = dev->transport->get_device_type(dev);
1393 1394
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1395
				dev->transport->get_device_rev(dev));
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1408
	int force_pt;
1409 1410 1411
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1412 1413
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1414 1415 1416
		return NULL;
	}

1417
	transport_init_queue_obj(&dev->dev_queue_obj);
1418 1419
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1420
	dev->dev_ptr		= transport_dev;
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	atomic_set(&dev->active_cmds, 0);
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->execute_task_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->ordered_cmd_list);
	INIT_LIST_HEAD(&dev->state_task_list);
1432
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->ordered_cmd_lock);
	spin_lock_init(&dev->state_task_lock);
	spin_lock_init(&dev->dev_alua_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->dev_status_thr_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1443
	spin_lock_init(&dev->qf_cmd_lock);
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480

	dev->queue_depth	= dev_limits->queue_depth;
	atomic_set(&dev->depth_left, dev->queue_depth);
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1481
					  "LIO_%s", dev->transport->name);
1482
	if (IS_ERR(dev->process_thread)) {
1483
		pr_err("Unable to create kthread: LIO_%s\n",
1484
			dev->transport->name);
1485 1486
		goto out;
	}
1487 1488 1489 1490
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1491 1492 1493 1494 1495 1496 1497 1498
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1499
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1500
		if (!inquiry_prod || !inquiry_rev) {
1501
			pr_err("All non TCM/pSCSI plugins require"
1502 1503 1504 1505
				" INQUIRY consts\n");
			goto out;
		}

1506 1507 1508
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1509 1510 1511
	}
	scsi_dump_inquiry(dev);

1512
	return dev;
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

static struct se_task *
transport_generic_get_task(struct se_cmd *cmd,
		enum dma_data_direction data_direction)
{
	struct se_task *task;
1561
	struct se_device *dev = cmd->se_dev;
1562

1563
	task = dev->transport->alloc_task(cmd->t_task_cdb);
1564
	if (!task) {
1565
		pr_err("Unable to allocate struct se_task\n");
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
		return NULL;
	}

	INIT_LIST_HEAD(&task->t_list);
	INIT_LIST_HEAD(&task->t_execute_list);
	INIT_LIST_HEAD(&task->t_state_list);
	init_completion(&task->task_stop_comp);
	task->task_se_cmd = cmd;
	task->se_dev = dev;
	task->task_data_direction = data_direction;

	return task;
}

static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1595 1596 1597
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
	INIT_LIST_HEAD(&cmd->se_ordered_node);
1598
	INIT_LIST_HEAD(&cmd->se_qf_node);
1599
	INIT_LIST_HEAD(&cmd->se_queue_node);
1600

1601 1602 1603 1604 1605 1606
	INIT_LIST_HEAD(&cmd->t_task_list);
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
	spin_lock_init(&cmd->t_state_lock);
	atomic_set(&cmd->transport_dev_active, 1);
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1623
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1624 1625
		return 0;

1626
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1627
		pr_debug("SAM Task Attribute ACA"
1628
			" emulation is not supported\n");
1629
		return -EINVAL;
1630 1631 1632 1633 1634
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1635
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1636
	smp_mb__after_atomic_inc();
1637
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1638
			cmd->se_ordered_id, cmd->sam_task_attr,
1639
			cmd->se_dev->transport->name);
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	return 0;
}

void transport_free_se_cmd(
	struct se_cmd *se_cmd)
{
	if (se_cmd->se_tmr_req)
		core_tmr_release_req(se_cmd->se_tmr_req);
	/*
	 * Check and free any extended CDB buffer that was allocated
	 */
1651 1652
	if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
		kfree(se_cmd->t_task_cdb);
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
}
EXPORT_SYMBOL(transport_free_se_cmd);

static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);

/*	transport_generic_allocate_tasks():
 *
 *	Called from fabric RX Thread.
 */
int transport_generic_allocate_tasks(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
	int ret;

	transport_generic_prepare_cdb(cdb);

	/*
	 * This is needed for early exceptions.
	 */
	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;

	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1680
		pr_err("Received SCSI CDB with command_size: %d that"
1681 1682
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1683
		return -EINVAL;
1684 1685 1686 1687 1688 1689
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1690 1691
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1692
						GFP_KERNEL);
1693 1694
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1695
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1696
				scsi_command_size(cdb),
1697
				(unsigned long)sizeof(cmd->__t_task_cdb));
1698
			return -ENOMEM;
1699 1700
		}
	} else
1701
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1702
	/*
1703
	 * Copy the original CDB into cmd->
1704
	 */
1705
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1706 1707 1708
	/*
	 * Setup the received CDB based on SCSI defined opcodes and
	 * perform unit attention, persistent reservations and ALUA
1709
	 * checks for virtual device backends.  The cmd->t_task_cdb
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	 * pointer is expected to be setup before we reach this point.
	 */
	ret = transport_generic_cmd_sequencer(cmd, cdb);
	if (ret < 0)
		return ret;
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1721
		return -EINVAL;
1722 1723 1724 1725 1726 1727 1728 1729 1730
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
EXPORT_SYMBOL(transport_generic_allocate_tasks);

1731 1732
static void transport_generic_request_failure(struct se_cmd *,
			struct se_device *, int, int);
1733 1734 1735 1736 1737 1738 1739
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1740 1741
	int ret;

1742 1743
	if (!cmd->se_lun) {
		dump_stack();
1744
		pr_err("cmd->se_lun is NULL\n");
1745 1746 1747 1748
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1749
		pr_err("transport_generic_handle_cdb cannot be called"
1750 1751 1752
				" from interrupt context\n");
		return -EINVAL;
	}
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
	/*
	 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
	 * correctly during shutdown via transport_generic_wait_for_tasks()
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
	atomic_set(&cmd->t_transport_active, 1);
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
	if (ret == -EAGAIN)
		return 0;
	else if (ret < 0) {
		cmd->transport_error_status = ret;
		transport_generic_request_failure(cmd, NULL, 0,
				(cmd->data_direction != DMA_TO_DEVICE));
	}
	return 0;
1778 1779 1780
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1781 1782 1783 1784 1785 1786 1787 1788
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1789
	if (!cmd->se_lun) {
1790
		dump_stack();
1791
		pr_err("cmd->se_lun is NULL\n");
1792
		return -EINVAL;
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
	}

	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1814
		return -EPERM;
1815 1816 1817 1818
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1819
	 * fabric module as we are expecting no further incoming DATA OUT
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
	/*
	 * This is needed for early exceptions.
	 */
	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;

	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1847 1848 1849 1850 1851 1852 1853
void transport_generic_free_cmd_intr(
	struct se_cmd *cmd)
{
	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
}
EXPORT_SYMBOL(transport_generic_free_cmd_intr);

1854 1855 1856 1857 1858 1859
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
	int ret = 0;

1860
	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1861
		cmd->se_tfo->get_task_tag(cmd));
1862 1863 1864 1865

	/*
	 * No tasks remain in the execution queue
	 */
1866
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1867
	list_for_each_entry_safe(task, task_tmp,
1868
				&cmd->t_task_list, t_list) {
1869
		pr_debug("task_no[%d] - Processing task %p\n",
1870 1871 1872 1873 1874 1875 1876
				task->task_no, task);
		/*
		 * If the struct se_task has not been sent and is not active,
		 * remove the struct se_task from the execution queue.
		 */
		if (!atomic_read(&task->task_sent) &&
		    !atomic_read(&task->task_active)) {
1877
			spin_unlock_irqrestore(&cmd->t_state_lock,
1878 1879 1880 1881
					flags);
			transport_remove_task_from_execute_queue(task,
					task->se_dev);

1882
			pr_debug("task_no[%d] - Removed from execute queue\n",
1883
				task->task_no);
1884
			spin_lock_irqsave(&cmd->t_state_lock, flags);
1885 1886 1887 1888 1889 1890 1891 1892 1893
			continue;
		}

		/*
		 * If the struct se_task is active, sleep until it is returned
		 * from the plugin.
		 */
		if (atomic_read(&task->task_active)) {
			atomic_set(&task->task_stop, 1);
1894
			spin_unlock_irqrestore(&cmd->t_state_lock,
1895 1896
					flags);

1897
			pr_debug("task_no[%d] - Waiting to complete\n",
1898 1899
				task->task_no);
			wait_for_completion(&task->task_stop_comp);
1900
			pr_debug("task_no[%d] - Stopped successfully\n",
1901 1902
				task->task_no);

1903 1904
			spin_lock_irqsave(&cmd->t_state_lock, flags);
			atomic_dec(&cmd->t_task_cdbs_left);
1905 1906 1907 1908

			atomic_set(&task->task_active, 0);
			atomic_set(&task->task_stop, 0);
		} else {
1909
			pr_debug("task_no[%d] - Did nothing\n", task->task_no);
1910 1911 1912 1913 1914
			ret++;
		}

		__transport_stop_task_timer(task, &flags);
	}
1915
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928

	return ret;
}

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
static void transport_generic_request_failure(
	struct se_cmd *cmd,
	struct se_device *dev,
	int complete,
	int sc)
{
1929 1930
	int ret = 0;

1931
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1932
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1933
		cmd->t_task_cdb[0]);
1934
	pr_debug("-----[ i_state: %d t_state/def_t_state:"
1935
		" %d/%d transport_error_status: %d\n",
1936
		cmd->se_tfo->get_cmd_state(cmd),
1937 1938
		cmd->t_state, cmd->deferred_t_state,
		cmd->transport_error_status);
1939
	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1940 1941
		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
		" t_transport_active: %d t_transport_stop: %d"
1942
		" t_transport_sent: %d\n", cmd->t_task_list_num,
1943 1944 1945 1946 1947 1948
		atomic_read(&cmd->t_task_cdbs_left),
		atomic_read(&cmd->t_task_cdbs_sent),
		atomic_read(&cmd->t_task_cdbs_ex_left),
		atomic_read(&cmd->t_transport_active),
		atomic_read(&cmd->t_transport_stop),
		atomic_read(&cmd->t_transport_sent));
1949 1950 1951 1952

	transport_stop_all_task_timers(cmd);

	if (dev)
1953
		atomic_inc(&dev->depth_left);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (complete) {
		transport_direct_request_timeout(cmd);
		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
	}

	switch (cmd->transport_error_status) {
	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
		break;
	case PYX_TRANSPORT_INVALID_CDB_FIELD:
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
		break;
	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
		break;
	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
		if (!sc)
			transport_new_cmd_failure(cmd);
		/*
		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
		 * we force this session to fall back to session
		 * recovery.
		 */
1986 1987
		cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
		cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014

		goto check_stop;
	case PYX_TRANSPORT_LU_COMM_FAILURE:
	case PYX_TRANSPORT_ILLEGAL_REQUEST:
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		break;
	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
		break;
	case PYX_TRANSPORT_WRITE_PROTECTED:
		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
		break;
	case PYX_TRANSPORT_RESERVATION_CONFLICT:
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
2015 2016 2017
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2018 2019 2020
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

2021 2022 2023
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret == -EAGAIN)
			goto queue_full;
2024 2025 2026 2027 2028 2029 2030
		goto check_stop;
	case PYX_TRANSPORT_USE_SENSE_REASON:
		/*
		 * struct se_cmd->scsi_sense_reason already set
		 */
		break;
	default:
2031
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2032
			cmd->t_task_cdb[0],
2033 2034 2035 2036
			cmd->transport_error_status);
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
2037 2038 2039 2040 2041 2042 2043 2044
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
	if (!sc && !cmd->se_tfo->new_cmd_map)
2045
		transport_new_cmd_failure(cmd);
2046 2047 2048 2049 2050 2051 2052
	else {
		ret = transport_send_check_condition_and_sense(cmd,
				cmd->scsi_sense_reason, 0);
		if (ret == -EAGAIN)
			goto queue_full;
	}

2053 2054
check_stop:
	transport_lun_remove_cmd(cmd);
2055
	if (!transport_cmd_check_stop_to_fabric(cmd))
2056
		;
2057 2058 2059 2060 2061
	return;

queue_full:
	cmd->t_state = TRANSPORT_COMPLETE_OK;
	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
2062 2063 2064 2065 2066 2067
}

static void transport_direct_request_timeout(struct se_cmd *cmd)
{
	unsigned long flags;

2068
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2069
	if (!atomic_read(&cmd->t_transport_timeout)) {
2070
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2071 2072
		return;
	}
2073 2074
	if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2075 2076 2077
		return;
	}

2078 2079 2080
	atomic_sub(atomic_read(&cmd->t_transport_timeout),
		   &cmd->t_se_count);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2081 2082 2083 2084 2085 2086 2087
}

static void transport_generic_request_timeout(struct se_cmd *cmd)
{
	unsigned long flags;

	/*
2088
	 * Reset cmd->t_se_count to allow transport_generic_remove()
2089 2090
	 * to allow last call to free memory resources.
	 */
2091 2092 2093
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (atomic_read(&cmd->t_transport_timeout) > 1) {
		int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
2094

2095
		atomic_sub(tmp, &cmd->t_se_count);
2096
	}
2097
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2098

2099
	transport_generic_remove(cmd, 0);
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
}

static inline u32 transport_lba_21(unsigned char *cdb)
{
	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}

static inline u32 transport_lba_32(unsigned char *cdb)
{
	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}

static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

/*
 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 */
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
	unsigned long flags;

2139
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2140
	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2141
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2142 2143 2144 2145 2146 2147 2148 2149
}

/*
 * Called from interrupt context.
 */
static void transport_task_timeout_handler(unsigned long data)
{
	struct se_task *task = (struct se_task *)data;
2150
	struct se_cmd *cmd = task->task_se_cmd;
2151 2152
	unsigned long flags;

2153
	pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2154

2155
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2156
	if (task->task_flags & TF_STOP) {
2157
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2158 2159 2160 2161 2162 2163 2164
		return;
	}
	task->task_flags &= ~TF_RUNNING;

	/*
	 * Determine if transport_complete_task() has already been called.
	 */
2165 2166
	if (!atomic_read(&task->task_active)) {
		pr_debug("transport task: %p cmd: %p timeout task_active"
2167
				" == 0\n", task, cmd);
2168
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2169 2170 2171
		return;
	}

2172 2173 2174
	atomic_inc(&cmd->t_se_count);
	atomic_inc(&cmd->t_transport_timeout);
	cmd->t_tasks_failed = 1;
2175 2176 2177 2178 2179 2180

	atomic_set(&task->task_timeout, 1);
	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
	task->task_scsi_status = 1;

	if (atomic_read(&task->task_stop)) {
2181
		pr_debug("transport task: %p cmd: %p timeout task_stop"
2182
				" == 1\n", task, cmd);
2183
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2184 2185 2186 2187
		complete(&task->task_stop_comp);
		return;
	}

2188 2189
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
		pr_debug("transport task: %p cmd: %p timeout non zero"
2190
				" t_task_cdbs_left\n", task, cmd);
2191
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2192 2193
		return;
	}
2194
	pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2195 2196 2197
			task, cmd);

	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2198
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2199 2200 2201 2202 2203

	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
}

/*
2204
 * Called with cmd->t_state_lock held.
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
 */
static void transport_start_task_timer(struct se_task *task)
{
	struct se_device *dev = task->se_dev;
	int timeout;

	if (task->task_flags & TF_RUNNING)
		return;
	/*
	 * If the task_timeout is disabled, exit now.
	 */
2216
	timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2217
	if (!timeout)
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
		return;

	init_timer(&task->task_timer);
	task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
	task->task_timer.data = (unsigned long) task;
	task->task_timer.function = transport_task_timeout_handler;

	task->task_flags |= TF_RUNNING;
	add_timer(&task->task_timer);
#if 0
2228
	pr_debug("Starting task timer for cmd: %p task: %p seconds:"
2229 2230 2231 2232 2233
		" %d\n", task->task_se_cmd, task, timeout);
#endif
}

/*
2234
 * Called with spin_lock_irq(&cmd->t_state_lock) held.
2235 2236 2237
 */
void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
{
2238
	struct se_cmd *cmd = task->task_se_cmd;
2239

2240
	if (!task->task_flags & TF_RUNNING)
2241 2242 2243
		return;

	task->task_flags |= TF_STOP;
2244
	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2245 2246 2247

	del_timer_sync(&task->task_timer);

2248
	spin_lock_irqsave(&cmd->t_state_lock, *flags);
2249 2250 2251 2252 2253 2254 2255 2256 2257
	task->task_flags &= ~TF_RUNNING;
	task->task_flags &= ~TF_STOP;
}

static void transport_stop_all_task_timers(struct se_cmd *cmd)
{
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;

2258
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2259
	list_for_each_entry_safe(task, task_tmp,
2260
				&cmd->t_task_list, t_list)
2261
		__transport_stop_task_timer(task, &flags);
2262
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
}

static inline int transport_tcq_window_closed(struct se_device *dev)
{
	if (dev->dev_tcq_window_closed++ <
			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
	} else
		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);

2273
	wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
	return 0;
}

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
2286
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2287 2288
		return 1;
	/*
L
Lucas De Marchi 已提交
2289
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2290 2291
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
2292
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2293
		atomic_inc(&cmd->se_dev->dev_hoq_count);
2294
		smp_mb__after_atomic_inc();
2295
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
2296
			" 0x%02x, se_ordered_id: %u\n",
2297
			cmd->t_task_cdb[0],
2298 2299
			cmd->se_ordered_id);
		return 1;
2300
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2301 2302 2303 2304
		spin_lock(&cmd->se_dev->ordered_cmd_lock);
		list_add_tail(&cmd->se_ordered_node,
				&cmd->se_dev->ordered_cmd_list);
		spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2305

2306
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
2307 2308
		smp_mb__after_atomic_inc();

2309
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2310
				" list, se_ordered_id: %u\n",
2311
				cmd->t_task_cdb[0],
2312 2313 2314 2315 2316 2317
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
2318
		if (!atomic_read(&cmd->se_dev->simple_cmds))
2319 2320 2321 2322 2323
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
2324
		atomic_inc(&cmd->se_dev->simple_cmds);
2325 2326 2327 2328 2329 2330 2331
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2332
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2333 2334
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2335
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2336
		 */
2337
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2338
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2339 2340 2341
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2342

2343
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2344
			" delayed CMD list, se_ordered_id: %u\n",
2345
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
static int transport_execute_tasks(struct se_cmd *cmd)
{
	int add_tasks;

2367 2368 2369 2370
	if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
		transport_generic_request_failure(cmd, NULL, 0, 1);
		return 0;
2371
	}
2372

2373 2374
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2375
	 * has occurred that prevents execution.
2376
	 */
2377
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2378 2379 2380 2381 2382
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2383
		if (!add_tasks)
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
			goto execute_tasks;
		/*
		 * This calls transport_add_tasks_from_cmd() to handle
		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
		 * (if enabled) in __transport_add_task_to_execute_queue() and
		 * transport_add_task_check_sam_attr().
		 */
		transport_add_tasks_from_cmd(cmd);
	}
	/*
	 * Kick the execution queue for the cmd associated struct se_device
	 * storage object.
	 */
execute_tasks:
2398
	__transport_execute_tasks(cmd->se_dev);
2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	return 0;
}

/*
 * Called to check struct se_device tcq depth window, and once open pull struct se_task
 * from struct se_device->execute_task_list and
 *
 * Called from transport_processing_thread()
 */
static int __transport_execute_tasks(struct se_device *dev)
{
	int error;
	struct se_cmd *cmd = NULL;
2412
	struct se_task *task = NULL;
2413 2414 2415 2416
	unsigned long flags;

	/*
	 * Check if there is enough room in the device and HBA queue to send
2417
	 * struct se_tasks to the selected transport.
2418 2419
	 */
check_depth:
2420
	if (!atomic_read(&dev->depth_left))
2421 2422
		return transport_tcq_window_closed(dev);

2423
	dev->dev_tcq_window_closed = 0;
2424

2425 2426 2427
	spin_lock_irq(&dev->execute_task_lock);
	if (list_empty(&dev->execute_task_list)) {
		spin_unlock_irq(&dev->execute_task_lock);
2428 2429
		return 0;
	}
2430 2431 2432 2433 2434 2435
	task = list_first_entry(&dev->execute_task_list,
				struct se_task, t_execute_list);
	list_del(&task->t_execute_list);
	atomic_set(&task->task_execute_queue, 0);
	atomic_dec(&dev->execute_tasks);
	spin_unlock_irq(&dev->execute_task_lock);
2436 2437 2438

	atomic_dec(&dev->depth_left);

2439
	cmd = task->task_se_cmd;
2440

2441
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2442 2443
	atomic_set(&task->task_active, 1);
	atomic_set(&task->task_sent, 1);
2444
	atomic_inc(&cmd->t_task_cdbs_sent);
2445

2446 2447
	if (atomic_read(&cmd->t_task_cdbs_sent) ==
	    cmd->t_task_list_num)
2448 2449 2450
		atomic_set(&cmd->transport_sent, 1);

	transport_start_task_timer(task);
2451
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2452 2453
	/*
	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2454
	 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	 * struct se_subsystem_api->do_task() caller below.
	 */
	if (cmd->transport_emulate_cdb) {
		error = cmd->transport_emulate_cdb(cmd);
		if (error != 0) {
			cmd->transport_error_status = error;
			atomic_set(&task->task_active, 0);
			atomic_set(&cmd->transport_sent, 0);
			transport_stop_tasks_for_cmd(cmd);
			transport_generic_request_failure(cmd, dev, 0, 1);
			goto check_depth;
		}
		/*
		 * Handle the successful completion for transport_emulate_cdb()
		 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
		 * Otherwise the caller is expected to complete the task with
		 * proper status.
		 */
		if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
			cmd->scsi_status = SAM_STAT_GOOD;
			task->task_scsi_status = GOOD;
			transport_complete_task(task, 1);
		}
	} else {
		/*
		 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
		 * RAMDISK we use the internal transport_emulate_control_cdb() logic
		 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
		 * LUN emulation code.
		 *
		 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
		 * call ->do_task() directly and let the underlying TCM subsystem plugin
		 * code handle the CDB emulation.
		 */
2489 2490
		if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
		    (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2491 2492
			error = transport_emulate_control_cdb(task);
		else
2493
			error = dev->transport->do_task(task);
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515

		if (error != 0) {
			cmd->transport_error_status = error;
			atomic_set(&task->task_active, 0);
			atomic_set(&cmd->transport_sent, 0);
			transport_stop_tasks_for_cmd(cmd);
			transport_generic_request_failure(cmd, dev, 0, 1);
		}
	}

	goto check_depth;

	return 0;
}

void transport_new_cmd_failure(struct se_cmd *se_cmd)
{
	unsigned long flags;
	/*
	 * Any unsolicited data will get dumped for failed command inside of
	 * the fabric plugin
	 */
2516
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2517 2518
	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2519
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2520 2521 2522 2523 2524 2525 2526 2527 2528
}

static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);

static inline u32 transport_get_sectors_6(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2529
	struct se_device *dev = cmd->se_dev;
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 8-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2541
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 8-bit sector value.
	 */
type_disk:
	return (u32)cdb[4];
}

static inline u32 transport_get_sectors_10(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2557
	struct se_device *dev = cmd->se_dev;
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 16-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_10 is not defined in SSC, throw an exception
	 */
2569 2570
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 16-bit sector value.
	 */
type_disk:
	return (u32)(cdb[7] << 8) + cdb[8];
}

static inline u32 transport_get_sectors_12(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2587
	struct se_device *dev = cmd->se_dev;
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_12 is not defined in SSC, throw an exception
	 */
2599 2600
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 32-bit sector value.
	 */
type_disk:
	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}

static inline u32 transport_get_sectors_16(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2617
	struct se_device *dev = cmd->se_dev;
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2629
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];

type_disk:
	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
		    (cdb[12] << 8) + cdb[13];
}

/*
 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 */
static inline u32 transport_get_sectors_32(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
		    (cdb[30] << 8) + cdb[31];

}

static inline u32 transport_get_size(
	u32 sectors,
	unsigned char *cdb,
	struct se_cmd *cmd)
{
2659
	struct se_device *dev = cmd->se_dev;
2660

2661
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2662
		if (cdb[1] & 1) { /* sectors */
2663
			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2664 2665 2666 2667
		} else /* bytes */
			return sectors;
	}
#if 0
2668
	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2669 2670 2671
			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
			dev->transport->name);
2672
#endif
2673
	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2674 2675 2676 2677 2678
}

static void transport_xor_callback(struct se_cmd *cmd)
{
	unsigned char *buf, *addr;
2679
	struct scatterlist *sg;
2680 2681
	unsigned int offset;
	int i;
2682
	int count;
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
	/*
	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
	 *
	 * 1) read the specified logical block(s);
	 * 2) transfer logical blocks from the data-out buffer;
	 * 3) XOR the logical blocks transferred from the data-out buffer with
	 *    the logical blocks read, storing the resulting XOR data in a buffer;
	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
	 *    blocks transferred from the data-out buffer; and
	 * 5) transfer the resulting XOR data to the data-in buffer.
	 */
	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2695 2696
	if (!buf) {
		pr_err("Unable to allocate xor_callback buf\n");
2697 2698 2699
		return;
	}
	/*
2700
	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2701 2702
	 * into the locally allocated *buf
	 */
2703 2704 2705 2706 2707
	sg_copy_to_buffer(cmd->t_data_sg,
			  cmd->t_data_nents,
			  buf,
			  cmd->data_length);

2708 2709
	/*
	 * Now perform the XOR against the BIDI read memory located at
2710
	 * cmd->t_mem_bidi_list
2711 2712 2713
	 */

	offset = 0;
2714 2715 2716
	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
		addr = kmap_atomic(sg_page(sg), KM_USER0);
		if (!addr)
2717 2718
			goto out;

2719 2720
		for (i = 0; i < sg->length; i++)
			*(addr + sg->offset + i) ^= *(buf + offset + i);
2721

2722
		offset += sg->length;
2723 2724
		kunmap_atomic(addr, KM_USER0);
	}
2725

2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
out:
	kfree(buf);
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
	struct se_device *dev;
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;
	u32 offset = 0;

2741 2742
	WARN_ON(!cmd->se_lun);

2743
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2744
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2745
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2746 2747 2748 2749
		return 0;
	}

	list_for_each_entry_safe(task, task_tmp,
2750
				&cmd->t_task_list, t_list) {
2751 2752 2753 2754 2755

		if (!task->task_sense)
			continue;

		dev = task->se_dev;
2756
		if (!dev)
2757 2758
			continue;

2759
		if (!dev->transport->get_sense_buffer) {
2760
			pr_err("dev->transport->get_sense_buffer"
2761 2762 2763 2764
					" is NULL\n");
			continue;
		}

2765
		sense_buffer = dev->transport->get_sense_buffer(task);
2766 2767
		if (!sense_buffer) {
			pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
2768
				" sense buffer for task with sense\n",
2769
				cmd->se_tfo->get_task_tag(cmd), task->task_no);
2770 2771
			continue;
		}
2772
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2773

2774
		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2775 2776
				TRANSPORT_SENSE_BUFFER);

2777
		memcpy(&buffer[offset], sense_buffer,
2778 2779 2780 2781 2782 2783
				TRANSPORT_SENSE_BUFFER);
		cmd->scsi_status = task->task_scsi_status;
		/* Automatically padded */
		cmd->scsi_sense_length =
				(TRANSPORT_SENSE_BUFFER + offset);

2784
		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2785
				" and sense\n",
2786
			dev->se_hba->hba_id, dev->transport->name,
2787 2788 2789
				cmd->scsi_status);
		return 0;
	}
2790
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808

	return -1;
}

static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
	/*
	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
	 * CONFLICT STATUS.
	 *
	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
	 */
2809 2810 2811
	if (cmd->se_sess &&
	    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
		core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2812 2813
			cmd->orig_fe_lun, 0x2C,
			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2814
	return -EINVAL;
2815 2816
}

2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2832 2833
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2834 2835 2836
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2837
		return -EINVAL;
2838 2839
	}

2840
	return 0;
2841 2842
}

2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
	/*
	 * Determine if the received WRITE_SAME is used to for direct
	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
	 */
	int passthrough = (dev->transport->transport_type ==
				TRANSPORT_PLUGIN_PHBA_PDEV);

	if (!passthrough) {
		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
			pr_err("WRITE_SAME PBDATA and LBDATA"
				" bits not supported for Block Discard"
				" Emulation\n");
			return -ENOSYS;
		}
		/*
		 * Currently for the emulated case we only accept
		 * tpws with the UNMAP=1 bit set.
		 */
		if (!(flags[0] & 0x08)) {
			pr_err("WRITE_SAME w/o UNMAP bit not"
				" supported for Block Discard Emulation\n");
			return -ENOSYS;
		}
	}

	return 0;
}

2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
/*	transport_generic_cmd_sequencer():
 *
 *	Generic Command Sequencer that should work for most DAS transport
 *	drivers.
 *
 *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
 *	RX Thread.
 *
 *	FIXME: Need to support other SCSI OPCODES where as well.
 */
static int transport_generic_cmd_sequencer(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
2889
	struct se_device *dev = cmd->se_dev;
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	int ret = 0, sector_ret = 0, passthrough;
	u32 sectors = 0, size = 0, pr_reg_type = 0;
	u16 service_action;
	u8 alua_ascq = 0;
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->transport_wait_for_tasks =
				&transport_nop_wait_for_tasks;
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2903
		return -EINVAL;
2904 2905 2906 2907
	}
	/*
	 * Check status of Asymmetric Logical Unit Assignment port
	 */
2908
	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2909 2910 2911
	if (ret != 0) {
		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
		/*
L
Lucas De Marchi 已提交
2912
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2913 2914 2915 2916 2917
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
#if 0
2918
			pr_debug("[%s]: ALUA TG Port not available,"
2919
				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2920
				cmd->se_tfo->get_fabric_name(), alua_ascq);
2921 2922 2923 2924
#endif
			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2925
			return -EINVAL;
2926 2927 2928 2929 2930 2931
		}
		goto out_invalid_cdb_field;
	}
	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
2932 2933
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
					cmd, cdb, pr_reg_type) != 0)
			return transport_handle_reservation_conflict(cmd);
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

	switch (cdb[0]) {
	case READ_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_6;
2950
		cmd->t_task_lba = transport_lba_21(cdb);
2951 2952 2953 2954 2955 2956 2957 2958
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_10;
2959
		cmd->t_task_lba = transport_lba_32(cdb);
2960 2961 2962 2963 2964 2965 2966 2967
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_12;
2968
		cmd->t_task_lba = transport_lba_32(cdb);
2969 2970 2971 2972 2973 2974 2975 2976
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_16;
2977
		cmd->t_task_lba = transport_lba_64(cdb);
2978 2979 2980 2981 2982 2983 2984 2985
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_6;
2986
		cmd->t_task_lba = transport_lba_21(cdb);
2987 2988 2989 2990 2991 2992 2993 2994
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_10;
2995 2996
		cmd->t_task_lba = transport_lba_32(cdb);
		cmd->t_tasks_fua = (cdb[1] & 0x8);
2997 2998 2999 3000 3001 3002 3003 3004
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_12;
3005 3006
		cmd->t_task_lba = transport_lba_32(cdb);
		cmd->t_tasks_fua = (cdb[1] & 0x8);
3007 3008 3009 3010 3011 3012 3013 3014
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_16;
3015 3016
		cmd->t_task_lba = transport_lba_64(cdb);
		cmd->t_tasks_fua = (cdb[1] & 0x8);
3017 3018 3019 3020
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case XDWRITEREAD_10:
		if ((cmd->data_direction != DMA_TO_DEVICE) ||
3021
		    !(cmd->t_tasks_bidi))
3022 3023 3024 3025 3026 3027
			goto out_invalid_cdb_field;
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
		cmd->transport_split_cdb = &split_cdb_XX_10;
3028
		cmd->t_task_lba = transport_lba_32(cdb);
3029
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3030
		passthrough = (dev->transport->transport_type ==
3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
				TRANSPORT_PLUGIN_PHBA_PDEV);
		/*
		 * Skip the remaining assignments for TCM/PSCSI passthrough
		 */
		if (passthrough)
			break;
		/*
		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
		 */
		cmd->transport_complete_callback = &transport_xor_callback;
3041
		cmd->t_tasks_fua = (cdb[1] & 0x8);
3042 3043 3044 3045 3046 3047 3048
		break;
	case VARIABLE_LENGTH_CMD:
		service_action = get_unaligned_be16(&cdb[8]);
		/*
		 * Determine if this is TCM/PSCSI device and we should disable
		 * internal emulation for this CDB.
		 */
3049
		passthrough = (dev->transport->transport_type ==
3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
					TRANSPORT_PLUGIN_PHBA_PDEV);

		switch (service_action) {
		case XDWRITEREAD_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
			size = transport_get_size(sectors, cdb, cmd);
			/*
			 * Use WRITE_32 and READ_32 opcodes for the emulated
			 * XDWRITE_READ_32 logic.
			 */
			cmd->transport_split_cdb = &split_cdb_XX_32;
3063
			cmd->t_task_lba = transport_lba_64_ext(cdb);
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;

			/*
			 * Skip the remaining assignments for TCM/PSCSI passthrough
			 */
			if (passthrough)
				break;

			/*
			 * Setup BIDI XOR callback to be run during
			 * transport_generic_complete_ok()
			 */
			cmd->transport_complete_callback = &transport_xor_callback;
3077
			cmd->t_tasks_fua = (cdb[10] & 0x8);
3078 3079 3080 3081 3082
			break;
		case WRITE_SAME_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
3083

3084
			if (sectors)
3085
				size = transport_get_size(1, cdb, cmd);
3086 3087 3088 3089 3090
			else {
				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
				       " supported\n");
				goto out_invalid_cdb_field;
			}
3091

3092
			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3093 3094
			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

3095
			if (target_check_write_same_discard(&cdb[10], dev) < 0)
3096
				goto out_invalid_cdb_field;
3097

3098 3099
			break;
		default:
3100
			pr_err("VARIABLE_LENGTH_CMD service action"
3101 3102 3103 3104
				" 0x%04x not supported\n", service_action);
			goto out_unsupported_cdb;
		}
		break;
3105
	case MAINTENANCE_IN:
3106
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3107 3108 3109 3110 3111 3112
			/* MAINTENANCE_IN from SCC-2 */
			/*
			 * Check for emulated MI_REPORT_TARGET_PGS.
			 */
			if (cdb[1] == MI_REPORT_TARGET_PGS) {
				cmd->transport_emulate_cdb =
3113
				(su_dev->t10_alua.alua_type ==
3114
				 SPC3_ALUA_EMULATED) ?
3115
				core_emulate_report_target_port_groups :
3116 3117 3118 3119 3120 3121 3122 3123
				NULL;
			}
			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else {
			/* GPCMD_SEND_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
3124
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
		break;
	case MODE_SELECT:
		size = cdb[4];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SELECT_10:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SENSE:
		size = cdb[4];
3136
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3137 3138 3139 3140 3141 3142 3143
		break;
	case MODE_SENSE_10:
	case GPCMD_READ_BUFFER_CAPACITY:
	case GPCMD_SEND_OPC:
	case LOG_SELECT:
	case LOG_SENSE:
		size = (cdb[7] << 8) + cdb[8];
3144
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3145 3146 3147
		break;
	case READ_BLOCK_LIMITS:
		size = READ_BLOCK_LEN;
3148
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
		break;
	case GPCMD_GET_CONFIGURATION:
	case GPCMD_READ_FORMAT_CAPACITIES:
	case GPCMD_READ_DISC_INFO:
	case GPCMD_READ_TRACK_RZONE_INFO:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case PERSISTENT_RESERVE_IN:
	case PERSISTENT_RESERVE_OUT:
		cmd->transport_emulate_cdb =
3160
			(su_dev->t10_pr.res_type ==
3161
			 SPC3_PERSISTENT_RESERVATIONS) ?
3162
			core_scsi3_emulate_pr : NULL;
3163
		size = (cdb[7] << 8) + cdb[8];
3164
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3165 3166 3167 3168 3169 3170 3171 3172
		break;
	case GPCMD_MECHANISM_STATUS:
	case GPCMD_READ_DVD_STRUCTURE:
		size = (cdb[8] << 8) + cdb[9];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case READ_POSITION:
		size = READ_POSITION_LEN;
3173
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3174
		break;
3175
	case MAINTENANCE_OUT:
3176
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3177 3178 3179 3180 3181 3182
			/* MAINTENANCE_OUT from SCC-2
			 *
			 * Check for emulated MO_SET_TARGET_PGS.
			 */
			if (cdb[1] == MO_SET_TARGET_PGS) {
				cmd->transport_emulate_cdb =
3183
				(su_dev->t10_alua.alua_type ==
3184
					SPC3_ALUA_EMULATED) ?
3185
				core_emulate_set_target_port_groups :
3186 3187 3188 3189 3190 3191 3192 3193 3194
				NULL;
			}

			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else  {
			/* GPCMD_REPORT_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
3195
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3196 3197 3198 3199 3200 3201 3202
		break;
	case INQUIRY:
		size = (cdb[3] << 8) + cdb[4];
		/*
		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
		 * See spc4r17 section 5.3
		 */
3203
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3204
			cmd->sam_task_attr = MSG_HEAD_TAG;
3205
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3206 3207 3208
		break;
	case READ_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3209
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3210 3211 3212
		break;
	case READ_CAPACITY:
		size = READ_CAP_LEN;
3213
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3214 3215 3216 3217 3218
		break;
	case READ_MEDIA_SERIAL_NUMBER:
	case SECURITY_PROTOCOL_IN:
	case SECURITY_PROTOCOL_OUT:
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3219
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
		break;
	case SERVICE_ACTION_IN:
	case ACCESS_CONTROL_IN:
	case ACCESS_CONTROL_OUT:
	case EXTENDED_COPY:
	case READ_ATTRIBUTE:
	case RECEIVE_COPY_RESULTS:
	case WRITE_ATTRIBUTE:
		size = (cdb[10] << 24) | (cdb[11] << 16) |
		       (cdb[12] << 8) | cdb[13];
3230
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3231 3232 3233 3234
		break;
	case RECEIVE_DIAGNOSTIC:
	case SEND_DIAGNOSTIC:
		size = (cdb[3] << 8) | cdb[4];
3235
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3236 3237 3238 3239 3240 3241
		break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
	case GPCMD_READ_CD:
		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
		size = (2336 * sectors);
3242
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3243 3244 3245 3246
		break;
#endif
	case READ_TOC:
		size = cdb[8];
3247
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3248 3249 3250
		break;
	case REQUEST_SENSE:
		size = cdb[4];
3251
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3252 3253 3254
		break;
	case READ_ELEMENT_STATUS:
		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3255
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3256 3257 3258
		break;
	case WRITE_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3259
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
		break;
	case RESERVE:
	case RESERVE_10:
		/*
		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		 */
		if (cdb[0] == RESERVE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		/*
		 * Setup the legacy emulated handler for SPC-2 and
		 * >= SPC-3 compatible reservation handling (CRH=1)
		 * Otherwise, we assume the underlying SCSI logic is
		 * is running in SPC_PASSTHROUGH, and wants reservations
		 * emulation disabled.
		 */
		cmd->transport_emulate_cdb =
3280
				(su_dev->t10_pr.res_type !=
3281
				 SPC_PASSTHROUGH) ?
3282
				core_scsi2_emulate_crh : NULL;
3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case RELEASE:
	case RELEASE_10:
		/*
		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		*/
		if (cdb[0] == RELEASE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		cmd->transport_emulate_cdb =
3297
				(su_dev->t10_pr.res_type !=
3298
				 SPC_PASSTHROUGH) ?
3299
				core_scsi2_emulate_crh : NULL;
3300 3301 3302 3303 3304 3305 3306 3307 3308
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case SYNCHRONIZE_CACHE:
	case 0x91: /* SYNCHRONIZE_CACHE_16: */
		/*
		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
		 */
		if (cdb[0] == SYNCHRONIZE_CACHE) {
			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3309
			cmd->t_task_lba = transport_lba_32(cdb);
3310 3311
		} else {
			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3312
			cmd->t_task_lba = transport_lba_64(cdb);
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
		}
		if (sector_ret)
			goto out_unsupported_cdb;

		size = transport_get_size(sectors, cdb, cmd);
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;

		/*
		 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
		 */
3323
		if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3324 3325 3326 3327 3328 3329 3330 3331
			break;
		/*
		 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
		 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
		 */
		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
		/*
		 * Check to ensure that LBA + Range does not exceed past end of
3332
		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3333
		 */
3334 3335 3336 3337
		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
			if (transport_cmd_get_valid_sectors(cmd) < 0)
				goto out_invalid_cdb_field;
		}
3338 3339 3340
		break;
	case UNMAP:
		size = get_unaligned_be16(&cdb[7]);
3341
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3342 3343 3344 3345 3346
		break;
	case WRITE_SAME_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
3347

3348
		if (sectors)
3349
			size = transport_get_size(1, cdb, cmd);
3350 3351 3352 3353
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
		}
3354

3355
		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

		if (target_check_write_same_discard(&cdb[1], dev) < 0)
			goto out_invalid_cdb_field;
		break;
	case WRITE_SAME:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;

		if (sectors)
3367
			size = transport_get_size(1, cdb, cmd);
3368 3369 3370
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
3371
		}
3372 3373

		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3374
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3375 3376 3377 3378 3379 3380
		/*
		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
		 * of byte 1 bit 3 UNMAP instead of original reserved field
		 */
		if (target_check_write_same_discard(&cdb[1], dev) < 0)
			goto out_invalid_cdb_field;
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
		break;
	case ALLOW_MEDIUM_REMOVAL:
	case GPCMD_CLOSE_TRACK:
	case ERASE:
	case INITIALIZE_ELEMENT_STATUS:
	case GPCMD_LOAD_UNLOAD:
	case REZERO_UNIT:
	case SEEK_10:
	case GPCMD_SET_SPEED:
	case SPACE:
	case START_STOP:
	case TEST_UNIT_READY:
	case VERIFY:
	case WRITE_FILEMARKS:
	case MOVE_MEDIUM:
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case REPORT_LUNS:
		cmd->transport_emulate_cdb =
3400
				transport_core_report_lun_response;
3401 3402 3403 3404 3405
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
		/*
		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
		 * See spc4r17 section 5.3
		 */
3406
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3407
			cmd->sam_task_attr = MSG_HEAD_TAG;
3408
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3409 3410
		break;
	default:
3411
		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3412
			" 0x%02x, sending CHECK_CONDITION.\n",
3413
			cmd->se_tfo->get_fabric_name(), cdb[0]);
3414 3415 3416 3417 3418
		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
		goto out_unsupported_cdb;
	}

	if (size != cmd->data_length) {
3419
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3420
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3421
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3422 3423 3424 3425 3426
				cmd->data_length, size, cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
3427
			pr_err("Rejecting underflow/overflow"
3428 3429 3430 3431 3432 3433 3434
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_SG_IO_CDB.
		 */
3435 3436
		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3437
				" CDB on non 512-byte sector setup subsystem"
3438
				" plugin: %s\n", dev->transport->name);
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

3453 3454 3455 3456 3457
	/* Let's limit control cdbs to a page, for simplicity's sake. */
	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
	    size > PAGE_SIZE)
		goto out_invalid_cdb_field;

3458 3459 3460 3461 3462 3463
	transport_set_supported_SAM_opcode(cmd);
	return ret;

out_unsupported_cdb:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3464
	return -EINVAL;
3465 3466 3467
out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3468
	return -EINVAL;
3469 3470 3471 3472 3473 3474 3475 3476 3477
}

/*
 * Called from transport_generic_complete_ok() and
 * transport_generic_request_failure() to determine which dormant/delayed
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
3478
	struct se_device *dev = cmd->se_dev;
3479 3480 3481
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

3482
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3483 3484 3485
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3486
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3487 3488
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3489
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3490 3491 3492
		atomic_dec(&dev->dev_hoq_count);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3493
		pr_debug("Incremented dev_cur_ordered_id: %u for"
3494 3495
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3496
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3497
		spin_lock(&dev->ordered_cmd_lock);
3498
		list_del(&cmd->se_ordered_node);
3499 3500 3501 3502 3503
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();
		spin_unlock(&dev->ordered_cmd_lock);

		dev->dev_cur_ordered_id++;
3504
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3505 3506 3507 3508 3509 3510 3511 3512 3513
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
3514
			&dev->delayed_cmd_list, se_delayed_node) {
3515

3516
		list_del(&cmd_p->se_delayed_node);
3517 3518
		spin_unlock(&dev->delayed_cmd_lock);

3519
		pr_debug("Calling add_tasks() for"
3520 3521
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
3522
			cmd_p->t_task_cdb[0],
3523 3524 3525 3526 3527 3528
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

		transport_add_tasks_from_cmd(cmd_p);
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
3529
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3530 3531 3532 3533 3534 3535 3536 3537
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
3538
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3539 3540
}

3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552
static int transport_complete_qf(struct se_cmd *cmd)
{
	int ret = 0;

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
		return cmd->se_tfo->queue_status(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
3553
		if (cmd->t_bidi_data_sg) {
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
				return ret;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

	return ret;
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
	struct se_device *dev,
	int (*qf_callback)(struct se_cmd *))
{
	spin_lock_irq(&dev->qf_cmd_lock);
	cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
	cmd->transport_qf_callback = qf_callback;
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

3585 3586
static void transport_generic_complete_ok(struct se_cmd *cmd)
{
3587
	int reason = 0, ret;
3588 3589 3590 3591 3592
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
3593
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3594
		transport_complete_task_attr(cmd);
3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

	if (cmd->transport_qf_callback) {
		ret = cmd->transport_qf_callback(cmd);
		if (ret < 0)
			goto queue_full;

		cmd->transport_qf_callback = NULL;
		goto done;
	}
3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		/*
		 * Only set when an struct se_task->task_scsi_status returned
		 * a non GOOD status.
		 */
		if (cmd->scsi_status) {
3623
			ret = transport_send_check_condition_and_sense(
3624
					cmd, reason, 1);
3625 3626 3627
			if (ret == -EAGAIN)
				goto queue_full;

3628 3629 3630 3631 3632 3633
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
3634
	 * Check for a callback, used by amongst other things
3635 3636 3637 3638 3639 3640 3641 3642
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3643 3644
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3645 3646 3647 3648
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

3649 3650 3651
		ret = cmd->se_tfo->queue_data_in(cmd);
		if (ret == -EAGAIN)
			goto queue_full;
3652 3653 3654
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3655 3656
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3657 3658 3659 3660 3661 3662
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
3663
		if (cmd->t_bidi_data_sg) {
3664
			spin_lock(&cmd->se_lun->lun_sep_lock);
3665 3666
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3667 3668 3669
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
3670 3671 3672
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret == -EAGAIN)
				goto queue_full;
3673 3674 3675 3676
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
3677 3678 3679
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret == -EAGAIN)
			goto queue_full;
3680 3681 3682 3683 3684
		break;
	default:
		break;
	}

3685
done:
3686 3687
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3688 3689 3690
	return;

queue_full:
3691
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3692 3693
		" data_direction: %d\n", cmd, cmd->data_direction);
	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
3694 3695 3696 3697 3698 3699 3700
}

static void transport_free_dev_tasks(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;

3701
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3702
	list_for_each_entry_safe(task, task_tmp,
3703
				&cmd->t_task_list, t_list) {
3704 3705 3706 3707 3708 3709 3710 3711
		if (atomic_read(&task->task_active))
			continue;

		kfree(task->task_sg_bidi);
		kfree(task->task_sg);

		list_del(&task->t_list);

3712
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3713
		if (task->se_dev)
3714
			task->se_dev->transport->free_task(task);
3715
		else
3716
			pr_err("task[%u] - task->se_dev is NULL\n",
3717
				task->task_no);
3718
		spin_lock_irqsave(&cmd->t_state_lock, flags);
3719
	}
3720
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3721 3722
}

3723
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3724
{
3725 3726
	struct scatterlist *sg;
	int count;
3727

3728 3729
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
3730

3731 3732
	kfree(sgl);
}
3733

3734 3735 3736 3737 3738 3739
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3740 3741
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
3742

3743
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3744 3745
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
}

static inline void transport_release_tasks(struct se_cmd *cmd)
{
	transport_free_dev_tasks(cmd);
}

static inline int transport_dec_and_check(struct se_cmd *cmd)
{
	unsigned long flags;

3757 3758
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (atomic_read(&cmd->t_fe_count)) {
3759
		if (!atomic_dec_and_test(&cmd->t_fe_count)) {
3760
			spin_unlock_irqrestore(&cmd->t_state_lock,
3761 3762 3763 3764 3765
					flags);
			return 1;
		}
	}

3766
	if (atomic_read(&cmd->t_se_count)) {
3767
		if (!atomic_dec_and_test(&cmd->t_se_count)) {
3768
			spin_unlock_irqrestore(&cmd->t_state_lock,
3769 3770 3771 3772
					flags);
			return 1;
		}
	}
3773
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784

	return 0;
}

static void transport_release_fe_cmd(struct se_cmd *cmd)
{
	unsigned long flags;

	if (transport_dec_and_check(cmd))
		return;

3785
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3786
	if (!atomic_read(&cmd->transport_dev_active)) {
3787
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3788 3789
		goto free_pages;
	}
3790
	atomic_set(&cmd->transport_dev_active, 0);
3791
	transport_all_task_dev_remove_state(cmd);
3792
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3793 3794 3795 3796

	transport_release_tasks(cmd);
free_pages:
	transport_free_pages(cmd);
3797
	transport_release_cmd(cmd);
3798 3799
}

3800 3801
static int
transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
3802 3803 3804 3805 3806
{
	unsigned long flags;

	if (transport_dec_and_check(cmd)) {
		if (session_reinstatement) {
3807
			spin_lock_irqsave(&cmd->t_state_lock, flags);
3808
			transport_all_task_dev_remove_state(cmd);
3809
			spin_unlock_irqrestore(&cmd->t_state_lock,
3810 3811 3812 3813 3814
					flags);
		}
		return 1;
	}

3815
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3816
	if (!atomic_read(&cmd->transport_dev_active)) {
3817
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3818 3819
		goto free_pages;
	}
3820
	atomic_set(&cmd->transport_dev_active, 0);
3821
	transport_all_task_dev_remove_state(cmd);
3822
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3823 3824

	transport_release_tasks(cmd);
3825

3826 3827
free_pages:
	transport_free_pages(cmd);
3828
	transport_release_cmd(cmd);
3829 3830 3831 3832
	return 0;
}

/*
3833 3834
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
3846 3847 3848 3849
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
3850
{
3851
	if (!sgl || !sgl_count)
3852 3853 3854 3855 3856
		return 0;

	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {

3857 3858
		cmd->t_data_sg = sgl;
		cmd->t_data_nents = sgl_count;
3859

3860 3861 3862
		if (sgl_bidi && sgl_bidi_count) {
			cmd->t_bidi_data_sg = sgl_bidi;
			cmd->t_bidi_data_nents = sgl_bidi_count;
3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
		}
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	}

	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

static int transport_new_cmd_obj(struct se_cmd *cmd)
{
3873
	struct se_device *dev = cmd->se_dev;
3874
	int set_counts = 1, rc, task_cdbs;
3875

3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887
	/*
	 * Setup any BIDI READ tasks and memory from
	 * cmd->t_mem_bidi_list so the READ struct se_tasks
	 * are queued first for the non pSCSI passthrough case.
	 */
	if (cmd->t_bidi_data_sg &&
	    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
		rc = transport_allocate_tasks(cmd,
					      cmd->t_task_lba,
					      DMA_FROM_DEVICE,
					      cmd->t_bidi_data_sg,
					      cmd->t_bidi_data_nents);
3888
		if (rc <= 0) {
3889 3890
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
3891
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3892
			return -EINVAL;
3893
		}
3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
		set_counts = 0;
	}
	/*
	 * Setup the tasks and memory from cmd->t_mem_list
	 * Note for BIDI transfers this will contain the WRITE payload
	 */
	task_cdbs = transport_allocate_tasks(cmd,
					     cmd->t_task_lba,
					     cmd->data_direction,
					     cmd->t_data_sg,
					     cmd->t_data_nents);
3907
	if (task_cdbs <= 0) {
3908 3909 3910
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason =
			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3911
		return -EINVAL;
3912
	}
3913

3914 3915 3916
	if (set_counts) {
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
3917 3918
	}

3919 3920
	cmd->t_task_list_num = task_cdbs;

3921 3922 3923
	atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
	atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
	atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
3924 3925 3926
	return 0;
}

3927 3928
void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
3929
	struct scatterlist *sg = cmd->t_data_sg;
3930

3931
	BUG_ON(!sg);
3932
	/*
3933 3934 3935
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3936
	 */
3937
	return kmap(sg_page(sg)) + sg->offset;
3938 3939 3940 3941 3942
}
EXPORT_SYMBOL(transport_kmap_first_data_page);

void transport_kunmap_first_data_page(struct se_cmd *cmd)
{
3943
	kunmap(sg_page(cmd->t_data_sg));
3944 3945 3946
}
EXPORT_SYMBOL(transport_kunmap_first_data_page);

3947
static int
3948
transport_generic_get_mem(struct se_cmd *cmd)
3949
{
3950 3951 3952 3953
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
	int i = 0;
3954

3955 3956 3957 3958
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
3959

3960 3961
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
3962

3963 3964 3965 3966 3967
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (!page)
			goto out;
3968

3969 3970 3971
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
3972 3973 3974
	}
	return 0;

3975 3976 3977 3978
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
3979
	}
3980 3981 3982
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
3983 3984
}

3985 3986
/* Reduce sectors if they are too long for the device */
static inline sector_t transport_limit_task_sectors(
3987 3988
	struct se_device *dev,
	unsigned long long lba,
3989
	sector_t sectors)
3990
{
3991
	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3992

3993 3994 3995
	if (dev->transport->get_device_type(dev) == TYPE_DISK)
		if ((lba + sectors) > transport_dev_end_lba(dev))
			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3996

3997
	return sectors;
3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008
}


/*
 * This function can be used by HW target mode drivers to create a linked
 * scatterlist from all contiguously allocated struct se_task->task_sg[].
 * This is intended to be called during the completion path by TCM Core
 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
 */
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
4009 4010 4011 4012
	struct scatterlist *sg_first = NULL;
	struct scatterlist *sg_prev = NULL;
	int sg_prev_nents = 0;
	struct scatterlist *sg;
4013
	struct se_task *task;
4014
	u32 chained_nents = 0;
4015 4016
	int i;

4017 4018
	BUG_ON(!cmd->se_tfo->task_sg_chaining);

4019 4020
	/*
	 * Walk the struct se_task list and setup scatterlist chains
4021
	 * for each contiguously allocated struct se_task->task_sg[].
4022
	 */
4023
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
4024
		if (!task->task_sg)
4025 4026
			continue;

4027 4028
		if (!sg_first) {
			sg_first = task->task_sg;
4029
			chained_nents = task->task_sg_nents;
4030
		} else {
4031
			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4032
			chained_nents += task->task_sg_nents;
4033
		}
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044
		/*
		 * For the padded tasks, use the extra SGL vector allocated
		 * in transport_allocate_data_tasks() for the sg_prev_nents
		 * offset into sg_chain() above..  The last task of a
		 * multi-task list, or a single task will not have
		 * task->task_sg_padded set..
		 */
		if (task->task_padded_sg)
			sg_prev_nents = (task->task_sg_nents + 1);
		else
			sg_prev_nents = task->task_sg_nents;
4045 4046

		sg_prev = task->task_sg;
4047 4048 4049 4050 4051
	}
	/*
	 * Setup the starting pointer and total t_tasks_sg_linked_no including
	 * padding SGs for linking and to mark the end.
	 */
4052
	cmd->t_tasks_sg_chained = sg_first;
4053
	cmd->t_tasks_sg_chained_no = chained_nents;
4054

4055
	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4056 4057
		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
		cmd->t_tasks_sg_chained_no);
4058

4059 4060
	for_each_sg(cmd->t_tasks_sg_chained, sg,
			cmd->t_tasks_sg_chained_no, i) {
4061

4062
		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
4063
			i, sg, sg_page(sg), sg->length, sg->offset);
4064
		if (sg_is_chain(sg))
4065
			pr_debug("SG: %p sg_is_chain=1\n", sg);
4066
		if (sg_is_last(sg))
4067
			pr_debug("SG: %p sg_is_last=1\n", sg);
4068 4069 4070 4071
	}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);

4072 4073 4074
/*
 * Break up cmd into chunks transport can handle
 */
4075
static int transport_allocate_data_tasks(
4076 4077 4078
	struct se_cmd *cmd,
	unsigned long long lba,
	enum dma_data_direction data_direction,
4079 4080
	struct scatterlist *sgl,
	unsigned int sgl_nents)
4081 4082 4083
{
	unsigned char *cdb = NULL;
	struct se_task *task;
4084
	struct se_device *dev = cmd->se_dev;
4085
	unsigned long flags;
4086
	int task_count, i, ret;
4087
	sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4088 4089 4090
	u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
	struct scatterlist *sg;
	struct scatterlist *cmd_sg;
4091

4092 4093
	WARN_ON(cmd->data_length % sector_size);
	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4094 4095
	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
	
4096 4097
	cmd_sg = sgl;
	for (i = 0; i < task_count; i++) {
4098
		unsigned int task_size, task_sg_nents_padded;
4099
		int count;
4100

4101
		task = transport_generic_get_task(cmd, data_direction);
4102
		if (!task)
4103
			return -ENOMEM;
4104 4105

		task->task_lba = lba;
4106 4107
		task->task_sectors = min(sectors, dev_max_sectors);
		task->task_size = task->task_sectors * sector_size;
4108

4109
		cdb = dev->transport->get_cdb(task);
4110 4111 4112 4113 4114 4115
		BUG_ON(!cdb);

		memcpy(cdb, cmd->t_task_cdb,
		       scsi_command_size(cmd->t_task_cdb));

		/* Update new cdb with updated lba/sectors */
4116
		cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4117 4118 4119 4120 4121
		/*
		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
		 * in order to calculate the number per task SGL entries
		 */
		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4122
		/*
4123 4124 4125
		 * Check if the fabric module driver is requesting that all
		 * struct se_task->task_sg[] be chained together..  If so,
		 * then allocate an extra padding SG entry for linking and
4126 4127 4128
		 * marking the end of the chained SGL for every task except
		 * the last one for (task_count > 1) operation, or skipping
		 * the extra padding for the (task_count == 1) case.
4129
		 */
4130 4131
		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
			task_sg_nents_padded = (task->task_sg_nents + 1);
4132
			task->task_padded_sg = 1;
4133 4134
		} else
			task_sg_nents_padded = task->task_sg_nents;
4135

4136
		task->task_sg = kmalloc(sizeof(struct scatterlist) *
4137
					task_sg_nents_padded, GFP_KERNEL);
4138 4139 4140 4141 4142
		if (!task->task_sg) {
			cmd->se_dev->transport->free_task(task);
			return -ENOMEM;
		}

4143
		sg_init_table(task->task_sg, task_sg_nents_padded);
4144

4145 4146 4147
		task_size = task->task_size;

		/* Build new sgl, only up to task_size */
4148
		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
4149 4150 4151 4152 4153 4154
			if (cmd_sg->length > task_size)
				break;

			*sg = *cmd_sg;
			task_size -= cmd_sg->length;
			cmd_sg = sg_next(cmd_sg);
4155 4156
		}

4157 4158
		lba += task->task_sectors;
		sectors -= task->task_sectors;
4159

4160 4161 4162
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		list_add_tail(&task->t_list, &cmd->t_task_list);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4163
	}
4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
	/*
	 * Now perform the memory map of task->task_sg[] into backend
	 * subsystem memory..
	 */
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
		if (atomic_read(&task->task_sent))
			continue;
		if (!dev->transport->map_data_SG)
			continue;

		ret = dev->transport->map_data_SG(task);
		if (ret < 0)
			return 0;
	}
4178

4179
	return task_count;
4180 4181 4182
}

static int
4183
transport_allocate_control_task(struct se_cmd *cmd)
4184
{
4185
	struct se_device *dev = cmd->se_dev;
4186 4187
	unsigned char *cdb;
	struct se_task *task;
4188
	unsigned long flags;
4189
	int ret = 0;
4190 4191 4192

	task = transport_generic_get_task(cmd, cmd->data_direction);
	if (!task)
4193
		return -ENOMEM;
4194

4195
	cdb = dev->transport->get_cdb(task);
4196 4197 4198
	BUG_ON(!cdb);
	memcpy(cdb, cmd->t_task_cdb,
	       scsi_command_size(cmd->t_task_cdb));
4199

4200 4201 4202 4203 4204 4205 4206 4207 4208
	task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
				GFP_KERNEL);
	if (!task->task_sg) {
		cmd->se_dev->transport->free_task(task);
		return -ENOMEM;
	}

	memcpy(task->task_sg, cmd->t_data_sg,
	       sizeof(struct scatterlist) * cmd->t_data_nents);
4209
	task->task_size = cmd->data_length;
4210
	task->task_sg_nents = cmd->t_data_nents;
4211

4212 4213 4214
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4215 4216

	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4217 4218
		if (dev->transport->map_control_SG)
			ret = dev->transport->map_control_SG(task);
4219 4220
	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
		if (dev->transport->cdb_none)
4221
			ret = dev->transport->cdb_none(task);
4222
	} else {
4223
		pr_err("target: Unknown control cmd type!\n");
4224
		BUG();
4225
	}
4226 4227 4228 4229 4230

	/* Success! Return number of tasks allocated */
	if (ret == 0)
		return 1;
	return ret;
4231 4232 4233 4234 4235 4236 4237 4238 4239
}

static u32 transport_allocate_tasks(
	struct se_cmd *cmd,
	unsigned long long lba,
	enum dma_data_direction data_direction,
	struct scatterlist *sgl,
	unsigned int sgl_nents)
{
4240 4241 4242 4243
	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
		if (transport_cmd_get_valid_sectors(cmd) < 0)
			return -EINVAL;

4244 4245
		return transport_allocate_data_tasks(cmd, lba, data_direction,
						     sgl, sgl_nents);
4246
	} else
4247 4248
		return transport_allocate_control_task(cmd);

4249 4250
}

4251

4252 4253 4254 4255 4256 4257 4258 4259 4260
/*	 transport_generic_new_cmd(): Called from transport_processing_thread()
 *
 *	 Allocate storage transport resources from a set of values predefined
 *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
 *	 Any non zero return here is treated as an "out of resource' op here.
 */
	/*
	 * Generate struct se_task(s) and/or their payloads for this CDB.
	 */
4261
int transport_generic_new_cmd(struct se_cmd *cmd)
4262 4263 4264 4265 4266 4267
{
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
4268
	 * beforehand.
4269
	 */
4270 4271
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
4272
		ret = transport_generic_get_mem(cmd);
4273 4274 4275
		if (ret < 0)
			return ret;
	}
4276 4277 4278 4279 4280 4281 4282
	/*
	 * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
	 * control or data CDB types, and perform the map to backend subsystem
	 * code from SGL memory allocated here by transport_generic_get_mem(), or
	 * via pre-existing SGL memory setup explictly by fabric module code with
	 * transport_generic_map_mem_to_cmd().
	 */
4283 4284 4285 4286
	ret = transport_new_cmd_obj(cmd);
	if (ret < 0)
		return ret;
	/*
4287
	 * For WRITEs, let the fabric know its buffer is ready..
4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
	 * will be added to the struct se_device execution queue after its WRITE
	 * data has arrived. (ie: It gets handled by the transport processing
	 * thread a second time)
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
		transport_add_tasks_to_state_queue(cmd);
		return transport_generic_write_pending(cmd);
	}
	/*
	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
	 * to the execution queue.
	 */
	transport_execute_tasks(cmd);
	return 0;
}
4304
EXPORT_SYMBOL(transport_generic_new_cmd);
4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

4316 4317 4318 4319 4320
static int transport_write_pending_qf(struct se_cmd *cmd)
{
	return cmd->se_tfo->write_pending(cmd);
}

4321 4322 4323 4324 4325 4326 4327 4328 4329
/*	transport_generic_write_pending():
 *
 *
 */
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

4330
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4331
	cmd->t_state = TRANSPORT_WRITE_PENDING;
4332
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343

	if (cmd->transport_qf_callback) {
		ret = cmd->transport_qf_callback(cmd);
		if (ret == -EAGAIN)
			goto queue_full;
		else if (ret < 0)
			return ret;

		cmd->transport_qf_callback = NULL;
		return 0;
	}
4344

4345 4346
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
4347
	 * cmd->t_transport_active=0 so that transport_generic_handle_data
4348
	 * can be called from HW target mode interrupt code.  This is safe
4349
	 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
4350 4351 4352 4353 4354 4355 4356 4357
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
4358
	ret = cmd->se_tfo->write_pending(cmd);
4359 4360 4361
	if (ret == -EAGAIN)
		goto queue_full;
	else if (ret < 0)
4362 4363 4364
		return ret;

	return PYX_TRANSPORT_WRITE_PENDING;
4365 4366

queue_full:
4367
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4368 4369 4370 4371
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
	transport_handle_queue_full(cmd, cmd->se_dev,
			transport_write_pending_qf);
	return ret;
4372 4373
}

4374
void transport_release_cmd(struct se_cmd *cmd)
4375
{
4376
	BUG_ON(!cmd->se_tfo);
4377 4378

	transport_free_se_cmd(cmd);
4379
	cmd->se_tfo->release_cmd(cmd);
4380
}
4381
EXPORT_SYMBOL(transport_release_cmd);
4382 4383 4384 4385 4386 4387 4388 4389 4390 4391

/*	transport_generic_free_cmd():
 *
 *	Called from processing frontend to release storage engine resources
 */
void transport_generic_free_cmd(
	struct se_cmd *cmd,
	int wait_for_tasks,
	int session_reinstatement)
{
4392
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
4393
		transport_release_cmd(cmd);
4394 4395 4396
	else {
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

4397
		if (cmd->se_lun) {
4398
#if 0
4399
			pr_debug("cmd: %p ITT: 0x%08x contains"
4400 4401
				" cmd->se_lun\n", cmd,
				cmd->se_tfo->get_task_tag(cmd));
4402 4403 4404 4405 4406 4407 4408
#endif
			transport_lun_remove_cmd(cmd);
		}

		if (wait_for_tasks && cmd->transport_wait_for_tasks)
			cmd->transport_wait_for_tasks(cmd, 0, 0);

4409 4410
		transport_free_dev_tasks(cmd);

4411
		transport_generic_remove(cmd, session_reinstatement);
4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

static void transport_nop_wait_for_tasks(
	struct se_cmd *cmd,
	int remove_cmd,
	int session_reinstatement)
{
	return;
}

/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
	int ret;
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
4437 4438 4439
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (atomic_read(&cmd->t_transport_stop)) {
		atomic_set(&cmd->transport_lun_stop, 0);
4440
		pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4441
			" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4442
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4443
		transport_cmd_check_stop(cmd, 1, 0);
4444
		return -EPERM;
4445
	}
4446 4447
	atomic_set(&cmd->transport_lun_fe_stop, 1);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4448

4449
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4450 4451 4452

	ret = transport_stop_tasks_for_cmd(cmd);

4453 4454
	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
			" %d\n", cmd, cmd->t_task_list_num, ret);
4455
	if (!ret) {
4456
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4457
				cmd->se_tfo->get_task_tag(cmd));
4458
		wait_for_completion(&cmd->transport_lun_stop_comp);
4459
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4460
				cmd->se_tfo->get_task_tag(cmd));
4461
	}
4462
	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4476 4477 4478 4479 4480
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
		list_del(&cmd->se_lun_node);

4481
		atomic_set(&cmd->transport_lun_active, 0);
4482 4483 4484 4485 4486
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
4487
		spin_lock(&cmd->t_state_lock);
4488
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4489
			"_lun_stop for  ITT: 0x%08x\n",
4490 4491
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4492 4493
		atomic_set(&cmd->transport_lun_stop, 1);
		spin_unlock(&cmd->t_state_lock);
4494 4495 4496

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

4497 4498
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4499 4500
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4501 4502 4503 4504 4505 4506
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
4507
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4508 4509
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4510

4511
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4512 4513 4514 4515
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

4516
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4517
			"_wait_for_tasks(): SUCCESS\n",
4518 4519
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4520

4521
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4522
		if (!atomic_read(&cmd->transport_dev_active)) {
4523
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4524 4525
			goto check_cond;
		}
4526
		atomic_set(&cmd->transport_dev_active, 0);
4527
		transport_all_task_dev_remove_state(cmd);
4528
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544

		transport_free_dev_tasks(cmd);
		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
4545 4546
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
		if (atomic_read(&cmd->transport_lun_fe_stop)) {
4547
			pr_debug("SE_LUN[%d] - Detected FE stop for"
4548 4549
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
4550
				cmd, cmd->se_tfo->get_task_tag(cmd));
4551

4552
			spin_unlock_irqrestore(&cmd->t_state_lock,
4553 4554
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
4555
			complete(&cmd->transport_lun_fe_stop_comp);
4556 4557 4558
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
4559
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4560
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4561

4562
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
	struct se_lun *lun = (struct se_lun *)p;

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

4582
	kt = kthread_run(transport_clear_lun_thread, lun,
4583 4584
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
4585
		pr_err("Unable to start clear_lun thread\n");
4586
		return PTR_ERR(kt);
4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

/*	transport_generic_wait_for_tasks():
 *
 *	Called from frontend or passthrough context to wait for storage engine
 *	to pause and/or release frontend generated struct se_cmd.
 */
static void transport_generic_wait_for_tasks(
	struct se_cmd *cmd,
	int remove_cmd,
	int session_reinstatement)
{
	unsigned long flags;

	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
		return;

4608
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4609 4610 4611
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
4612
	 * The cmd->transport_lun_stopped_sem will be upped by
4613 4614 4615
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
4616
	if (atomic_read(&cmd->transport_lun_stop)) {
4617

4618
		pr_debug("wait_for_tasks: Stopping"
4619
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4620
			"_stop_comp); for ITT: 0x%08x\n",
4621
			cmd->se_tfo->get_task_tag(cmd));
4622 4623 4624 4625 4626 4627 4628
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
4629 4630 4631 4632
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
4633 4634 4635 4636 4637 4638 4639

		transport_all_task_dev_remove_state(cmd);
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
4640
		pr_debug("wait_for_tasks: Stopped"
4641
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4642
			"stop_comp); for ITT: 0x%08x\n",
4643
			cmd->se_tfo->get_task_tag(cmd));
4644

4645
		atomic_set(&cmd->transport_lun_stop, 0);
4646
	}
4647 4648
	if (!atomic_read(&cmd->t_transport_active) ||
	     atomic_read(&cmd->t_transport_aborted))
4649 4650
		goto remove;

4651
	atomic_set(&cmd->t_transport_stop, 1);
4652

4653
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4654
		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
4655 4656
		" = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
4657 4658
		cmd->deferred_t_state);

4659
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4660

4661
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4662

4663
	wait_for_completion(&cmd->t_transport_stop_comp);
4664

4665 4666 4667
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	atomic_set(&cmd->t_transport_active, 0);
	atomic_set(&cmd->t_transport_stop, 0);
4668

4669
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4670
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4671
		cmd->se_tfo->get_task_tag(cmd));
4672
remove:
4673
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4674 4675 4676
	if (!remove_cmd)
		return;

4677
	transport_generic_free_cmd(cmd, 0, session_reinstatement);
4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711
}

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

4712
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4713
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4714
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4715 4716 4717
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4718
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
4731
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4732 4733 4734 4735 4736 4737 4738
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
4739 4740 4741 4742 4743 4744 4745
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
4875
	return cmd->se_tfo->queue_status(cmd);
4876 4877 4878 4879 4880 4881 4882
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

4883
	if (atomic_read(&cmd->t_transport_aborted) != 0) {
4884
		if (!send_status ||
4885 4886 4887
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
#if 0
4888
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4889
			" status for CDB: 0x%02x ITT: 0x%08x\n",
4890
			cmd->t_task_cdb[0],
4891
			cmd->se_tfo->get_task_tag(cmd));
4892 4893
#endif
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4894
		cmd->se_tfo->queue_status(cmd);
4895 4896 4897 4898 4899 4900 4901 4902
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
4903 4904 4905 4906 4907 4908 4909 4910 4911
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

4912 4913 4914 4915 4916 4917 4918
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
4919
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4920
			atomic_inc(&cmd->t_transport_aborted);
4921 4922 4923 4924 4925 4926 4927 4928
			smp_mb__after_atomic_inc();
			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
			transport_new_cmd_failure(cmd);
			return;
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
4929
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4930
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4931
		cmd->se_tfo->get_task_tag(cmd));
4932
#endif
4933
	cmd->se_tfo->queue_status(cmd);
4934 4935 4936 4937 4938 4939 4940 4941
}

/*	transport_generic_do_tmr():
 *
 *
 */
int transport_generic_do_tmr(struct se_cmd *cmd)
{
4942
	struct se_device *dev = cmd->se_dev;
4943 4944 4945 4946
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
4947
	case TMR_ABORT_TASK:
4948 4949
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4950 4951 4952
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
4953 4954
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
4955
	case TMR_LUN_RESET:
4956 4957 4958 4959
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
4960
	case TMR_TARGET_WARM_RESET:
4961 4962
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4963
	case TMR_TARGET_COLD_RESET:
4964 4965 4966
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
4967
		pr_err("Uknown TMR function: 0x%02x.\n",
4968 4969 4970 4971 4972 4973
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4974
	cmd->se_tfo->queue_tm_rsp(cmd);
4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010

	transport_cmd_check_stop(cmd, 2, 0);
	return 0;
}

/*
 *	Called with spin_lock_irq(&dev->execute_task_lock); held
 *
 */
static struct se_task *
transport_get_task_from_state_list(struct se_device *dev)
{
	struct se_task *task;

	if (list_empty(&dev->state_task_list))
		return NULL;

	list_for_each_entry(task, &dev->state_task_list, t_state_list)
		break;

	list_del(&task->t_state_list);
	atomic_set(&task->task_state_active, 0);

	return task;
}

static void transport_processing_shutdown(struct se_device *dev)
{
	struct se_cmd *cmd;
	struct se_task *task;
	unsigned long flags;
	/*
	 * Empty the struct se_device's struct se_task state list.
	 */
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	while ((task = transport_get_task_from_state_list(dev))) {
5011
		if (!task->task_se_cmd) {
5012
			pr_err("task->task_se_cmd is NULL!\n");
5013 5014
			continue;
		}
5015
		cmd = task->task_se_cmd;
5016 5017 5018

		spin_unlock_irqrestore(&dev->execute_task_lock, flags);

5019
		spin_lock_irqsave(&cmd->t_state_lock, flags);
5020

5021 5022
		pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
			" i_state: %d, t_state/def_t_state:"
5023
			" %d/%d cdb: 0x%02x\n", cmd, task,
5024 5025
			cmd->se_tfo->get_task_tag(cmd),
			cmd->se_tfo->get_cmd_state(cmd),
5026
			cmd->t_state, cmd->deferred_t_state,
5027
			cmd->t_task_cdb[0]);
5028
		pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
5029 5030
			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
			" t_transport_stop: %d t_transport_sent: %d\n",
5031
			cmd->se_tfo->get_task_tag(cmd),
5032
			cmd->t_task_list_num,
5033 5034 5035 5036 5037
			atomic_read(&cmd->t_task_cdbs_left),
			atomic_read(&cmd->t_task_cdbs_sent),
			atomic_read(&cmd->t_transport_active),
			atomic_read(&cmd->t_transport_stop),
			atomic_read(&cmd->t_transport_sent));
5038 5039 5040 5041

		if (atomic_read(&task->task_active)) {
			atomic_set(&task->task_stop, 1);
			spin_unlock_irqrestore(
5042
				&cmd->t_state_lock, flags);
5043

5044
			pr_debug("Waiting for task: %p to shutdown for dev:"
5045 5046
				" %p\n", task, dev);
			wait_for_completion(&task->task_stop_comp);
5047
			pr_debug("Completed task: %p shutdown for dev: %p\n",
5048 5049
				task, dev);

5050 5051
			spin_lock_irqsave(&cmd->t_state_lock, flags);
			atomic_dec(&cmd->t_task_cdbs_left);
5052 5053 5054

			atomic_set(&task->task_active, 0);
			atomic_set(&task->task_stop, 0);
5055 5056 5057
		} else {
			if (atomic_read(&task->task_execute_queue) != 0)
				transport_remove_task_from_execute_queue(task, dev);
5058 5059 5060
		}
		__transport_stop_task_timer(task, &flags);

5061
		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5062
			spin_unlock_irqrestore(
5063
					&cmd->t_state_lock, flags);
5064

5065
			pr_debug("Skipping task: %p, dev: %p for"
5066
				" t_task_cdbs_ex_left: %d\n", task, dev,
5067
				atomic_read(&cmd->t_task_cdbs_ex_left));
5068 5069 5070 5071 5072

			spin_lock_irqsave(&dev->execute_task_lock, flags);
			continue;
		}

5073
		if (atomic_read(&cmd->t_transport_active)) {
5074
			pr_debug("got t_transport_active = 1 for task: %p, dev:"
5075 5076
					" %p\n", task, dev);

5077
			if (atomic_read(&cmd->t_fe_count)) {
5078
				spin_unlock_irqrestore(
5079
					&cmd->t_state_lock, flags);
5080 5081 5082 5083
				transport_send_check_condition_and_sense(
					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
					0);
				transport_remove_cmd_from_queue(cmd,
5084
					&cmd->se_dev->dev_queue_obj);
5085 5086 5087 5088 5089

				transport_lun_remove_cmd(cmd);
				transport_cmd_check_stop(cmd, 1, 0);
			} else {
				spin_unlock_irqrestore(
5090
					&cmd->t_state_lock, flags);
5091 5092

				transport_remove_cmd_from_queue(cmd,
5093
					&cmd->se_dev->dev_queue_obj);
5094 5095 5096 5097

				transport_lun_remove_cmd(cmd);

				if (transport_cmd_check_stop(cmd, 1, 0))
5098
					transport_generic_remove(cmd, 0);
5099 5100 5101 5102 5103
			}

			spin_lock_irqsave(&dev->execute_task_lock, flags);
			continue;
		}
5104
		pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
5105 5106
				task, dev);

5107
		if (atomic_read(&cmd->t_fe_count)) {
5108
			spin_unlock_irqrestore(
5109
				&cmd->t_state_lock, flags);
5110 5111 5112
			transport_send_check_condition_and_sense(cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
			transport_remove_cmd_from_queue(cmd,
5113
				&cmd->se_dev->dev_queue_obj);
5114 5115 5116 5117 5118

			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop(cmd, 1, 0);
		} else {
			spin_unlock_irqrestore(
5119
				&cmd->t_state_lock, flags);
5120 5121

			transport_remove_cmd_from_queue(cmd,
5122
				&cmd->se_dev->dev_queue_obj);
5123 5124 5125
			transport_lun_remove_cmd(cmd);

			if (transport_cmd_check_stop(cmd, 1, 0))
5126
				transport_generic_remove(cmd, 0);
5127 5128 5129 5130 5131 5132 5133 5134
		}

		spin_lock_irqsave(&dev->execute_task_lock, flags);
	}
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
	/*
	 * Empty the struct se_device's struct se_cmd list.
	 */
5135
	while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5136

5137
		pr_debug("From Device Queue: cmd: %p t_state: %d\n",
5138
				cmd, cmd->t_state);
5139

5140
		if (atomic_read(&cmd->t_fe_count)) {
5141 5142 5143 5144 5145 5146 5147 5148
			transport_send_check_condition_and_sense(cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);

			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop(cmd, 1, 0);
		} else {
			transport_lun_remove_cmd(cmd);
			if (transport_cmd_check_stop(cmd, 1, 0))
5149
				transport_generic_remove(cmd, 0);
5150 5151 5152 5153 5154 5155 5156 5157 5158 5159
		}
	}
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
5160
	int ret;
5161 5162 5163 5164 5165 5166
	struct se_cmd *cmd;
	struct se_device *dev = (struct se_device *) param;

	set_user_nice(current, -20);

	while (!kthread_should_stop()) {
5167 5168
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
				kthread_should_stop());
		if (ret < 0)
			goto out;

		spin_lock_irq(&dev->dev_status_lock);
		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
			spin_unlock_irq(&dev->dev_status_lock);
			transport_processing_shutdown(dev);
			continue;
		}
		spin_unlock_irq(&dev->dev_status_lock);

get_cmd:
		__transport_execute_tasks(dev);

5184 5185
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
5186 5187
			continue;

5188
		switch (cmd->t_state) {
5189 5190 5191
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
5192
		case TRANSPORT_NEW_CMD_MAP:
5193 5194
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
5195 5196 5197
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
5198
			ret = cmd->se_tfo->new_cmd_map(cmd);
5199 5200 5201 5202 5203 5204 5205 5206
			if (ret < 0) {
				cmd->transport_error_status = ret;
				transport_generic_request_failure(cmd, NULL,
						0, (cmd->data_direction !=
						    DMA_TO_DEVICE));
				break;
			}
			ret = transport_generic_new_cmd(cmd);
5207 5208 5209
			if (ret == -EAGAIN)
				break;
			else if (ret < 0) {
5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223
				cmd->transport_error_status = ret;
				transport_generic_request_failure(cmd, NULL,
					0, (cmd->data_direction !=
					 DMA_TO_DEVICE));
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_COMPLETE_OK:
			transport_stop_all_task_timers(cmd);
			transport_generic_complete_ok(cmd);
			break;
		case TRANSPORT_REMOVE:
5224
			transport_generic_remove(cmd, 0);
5225
			break;
5226
		case TRANSPORT_FREE_CMD_INTR:
5227
			transport_generic_free_cmd(cmd, 0, 0);
5228
			break;
5229 5230 5231 5232 5233 5234 5235 5236 5237 5238
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
		case TRANSPORT_COMPLETE_FAILURE:
			transport_generic_request_failure(cmd, NULL, 1, 1);
			break;
		case TRANSPORT_COMPLETE_TIMEOUT:
			transport_stop_all_task_timers(cmd);
			transport_generic_request_timeout(cmd);
			break;
5239 5240 5241
		case TRANSPORT_COMPLETE_QF_WP:
			transport_generic_write_pending(cmd);
			break;
5242
		default:
5243
			pr_err("Unknown t_state: %d deferred_t_state:"
5244
				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
5245
				" %u\n", cmd->t_state, cmd->deferred_t_state,
5246 5247 5248
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259
			BUG();
		}

		goto get_cmd;
	}

out:
	transport_release_all_cmds(dev);
	dev->process_thread = NULL;
	return 0;
}