target_core_transport.c 96.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40
#include <linux/ratelimit.h>
41 42 43 44 45
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
46
#include <scsi/scsi_tcq.h>
47 48

#include <target/target_core_base.h>
49 50
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
51 52
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
53
#include "target_core_internal.h"
54 55 56 57
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

58
static int sub_api_initialized;
59

60
static struct workqueue_struct *target_completion_wq;
61 62 63 64 65 66 67 68 69
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static int transport_generic_write_pending(struct se_cmd *);
70
static int transport_processing_thread(void *param);
71
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72
static void transport_complete_task_attr(struct se_cmd *cmd);
73
static void transport_handle_queue_full(struct se_cmd *cmd,
74
		struct se_device *dev);
75
static int transport_generic_get_mem(struct se_cmd *cmd);
76
static void transport_put_cmd(struct se_cmd *cmd);
77
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
78
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
79
static void target_complete_ok_work(struct work_struct *work);
80

81
int init_se_kmem_caches(void)
82 83 84 85
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
86 87
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
88
				" failed\n");
89
		goto out;
90 91 92 93
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
94 95
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
96
		goto out_free_sess_cache;
97 98 99 100
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
101 102
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
103
				" failed\n");
104
		goto out_free_ua_cache;
105 106 107 108
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
109 110
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
111
				" failed\n");
112
		goto out_free_pr_reg_cache;
113 114 115 116
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
117 118
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
119
				"cache failed\n");
120
		goto out_free_lu_gp_cache;
121 122 123 124
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
125 126
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
127
				"cache failed\n");
128
		goto out_free_lu_gp_mem_cache;
129 130 131 132 133 134
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
135 136
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
137
				"mem_t failed\n");
138
		goto out_free_tg_pt_gp_cache;
139 140
	}

141 142 143 144 145
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

146
	return 0;
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
162
out:
163
	return -ENOMEM;
164 165
}

166
void release_se_kmem_caches(void)
167
{
168
	destroy_workqueue(target_completion_wq);
169 170 171 172 173 174 175 176 177
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

178 179 180
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
181 182 183 184 185 186 187 188

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

189
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
190

191 192 193
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
194 195 196 197

	return new_index;
}

C
Christoph Hellwig 已提交
198
static void transport_init_queue_obj(struct se_queue_obj *qobj)
199 200 201 202 203 204 205
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}

206
void transport_subsystem_check_init(void)
207 208 209
{
	int ret;

210 211 212
	if (sub_api_initialized)
		return;

213 214
	ret = request_module("target_core_iblock");
	if (ret != 0)
215
		pr_err("Unable to load target_core_iblock\n");
216 217 218

	ret = request_module("target_core_file");
	if (ret != 0)
219
		pr_err("Unable to load target_core_file\n");
220 221 222

	ret = request_module("target_core_pscsi");
	if (ret != 0)
223
		pr_err("Unable to load target_core_pscsi\n");
224 225 226

	ret = request_module("target_core_stgt");
	if (ret != 0)
227
		pr_err("Unable to load target_core_stgt\n");
228

229
	sub_api_initialized = 1;
230
	return;
231 232 233 234 235 236 237
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
238 239
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
240 241 242 243 244
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
245 246 247
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
248
	kref_init(&se_sess->sess_kref);
249 250 251 252 253 254

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
255
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
278
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
279
			memset(&buf[0], 0, PR_REG_ISID_LEN);
280
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
281 282 283
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
284 285
		kref_get(&se_nacl->acl_kref);

286 287 288 289 290 291 292 293 294 295 296 297 298
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

299
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
300
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
301 302 303 304 305 306 307 308 309
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
310 311 312
	unsigned long flags;

	spin_lock_irqsave(&se_tpg->session_lock, flags);
313
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
314
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
315 316 317
}
EXPORT_SYMBOL(transport_register_session);

318
void target_release_session(struct kref *kref)
319 320 321 322 323 324 325 326 327 328 329 330 331 332
{
	struct se_session *se_sess = container_of(kref,
			struct se_session, sess_kref);
	struct se_portal_group *se_tpg = se_sess->se_tpg;

	se_tpg->se_tpg_tfo->close_session(se_sess);
}

void target_get_session(struct se_session *se_sess)
{
	kref_get(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);

333
void target_put_session(struct se_session *se_sess)
334
{
335 336 337 338 339 340
	struct se_portal_group *tpg = se_sess->se_tpg;

	if (tpg->se_tpg_tfo->put_session != NULL) {
		tpg->se_tpg_tfo->put_session(se_sess);
		return;
	}
341
	kref_put(&se_sess->sess_kref, target_release_session);
342 343 344
}
EXPORT_SYMBOL(target_put_session);

345 346 347 348 349 350 351 352 353 354 355 356 357
static void target_complete_nacl(struct kref *kref)
{
	struct se_node_acl *nacl = container_of(kref,
				struct se_node_acl, acl_kref);

	complete(&nacl->acl_free_comp);
}

void target_put_nacl(struct se_node_acl *nacl)
{
	kref_put(&nacl->acl_kref, target_complete_nacl);
}

358 359 360
void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
361
	unsigned long flags;
362 363 364 365
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
366
	if (se_nacl) {
367
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
368 369
		if (se_nacl->acl_stop == 0)
			list_del(&se_sess->sess_acl_list);
370 371 372 373 374 375 376 377 378 379 380 381
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
382
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
383 384 385 386 387 388 389 390 391 392 393 394 395
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
396
	struct target_core_fabric_ops *se_tfo;
397
	struct se_node_acl *se_nacl;
398
	unsigned long flags;
399
	bool comp_nacl = true;
400

401
	if (!se_tpg) {
402 403 404
		transport_free_session(se_sess);
		return;
	}
405
	se_tfo = se_tpg->se_tpg_tfo;
406

407
	spin_lock_irqsave(&se_tpg->session_lock, flags);
408 409 410
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
411
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
412 413 414 415 416 417

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
418 419 420 421 422 423 424 425 426 427 428 429 430

	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
	if (se_nacl && se_nacl->dynamic_node_acl) {
		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
			list_del(&se_nacl->acl_list);
			se_tpg->num_node_acls--;
			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
			core_tpg_wait_for_nacl_pr_ref(se_nacl);
			core_free_device_list_for_node(se_nacl, se_tpg);
			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);

			comp_nacl = false;
			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
431 432
		}
	}
433
	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
434

435
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
436
		se_tpg->se_tpg_tfo->get_fabric_name());
437
	/*
438 439 440
	 * If last kref is dropping now for an explict NodeACL, awake sleeping
	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
	 * removal context.
441 442
	 */
	if (se_nacl && comp_nacl == true)
443
		target_put_nacl(se_nacl);
444

445
	transport_free_session(se_sess);
446 447 448 449
}
EXPORT_SYMBOL(transport_deregister_session);

/*
450
 * Called with cmd->t_state_lock held.
451
 */
452
static void target_remove_from_state_list(struct se_cmd *cmd)
453
{
454
	struct se_device *dev = cmd->se_dev;
455 456
	unsigned long flags;

457 458
	if (!dev)
		return;
459

460 461
	if (cmd->transport_state & CMD_T_BUSY)
		return;
462

463 464 465 466
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (cmd->state_active) {
		list_del(&cmd->state_list);
		cmd->state_active = false;
467
	}
468
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
469 470 471 472
}

/*	transport_cmd_check_stop():
 *
473
 *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
474 475 476 477 478 479 480 481 482 483 484 485
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

486
	spin_lock_irqsave(&cmd->t_state_lock, flags);
487 488 489 490
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
491 492 493
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
494

495
		cmd->transport_state &= ~CMD_T_ACTIVE;
496
		if (transport_off == 2)
497
			target_remove_from_state_list(cmd);
498
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
499

500
		complete(&cmd->transport_lun_stop_comp);
501 502 503 504
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
505
	 * this command for frontend exceptions.
506
	 */
507 508 509
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
510
			cmd->se_tfo->get_task_tag(cmd));
511 512

		if (transport_off == 2)
513
			target_remove_from_state_list(cmd);
514 515 516 517 518 519 520

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
521
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
522

523
		complete(&cmd->t_transport_stop_comp);
524 525 526
		return 1;
	}
	if (transport_off) {
527
		cmd->transport_state &= ~CMD_T_ACTIVE;
528
		if (transport_off == 2) {
529
			target_remove_from_state_list(cmd);
530 531 532 533 534 535 536
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
537
			 * their internally allocated I/O reference now and
538
			 * struct se_cmd now.
539 540 541 542
			 *
			 * Fabric modules are expected to return '1' here if the
			 * se_cmd being passed is released at this point,
			 * or zero if not being released.
543
			 */
544
			if (cmd->se_tfo->check_stop_free != NULL) {
545
				spin_unlock_irqrestore(
546
					&cmd->t_state_lock, flags);
547

548
				return cmd->se_tfo->check_stop_free(cmd);
549 550
			}
		}
551
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
552 553 554 555

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
556
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
557 558 559 560 561 562 563 564 565 566 567

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
568
	struct se_lun *lun = cmd->se_lun;
569 570 571 572 573
	unsigned long flags;

	if (!lun)
		return;

574
	spin_lock_irqsave(&cmd->t_state_lock, flags);
575 576
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
577
		target_remove_from_state_list(cmd);
578
	}
579
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
580 581

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
582 583
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
584 585 586 587 588
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
589
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
590
		transport_lun_remove_cmd(cmd);
591 592 593

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
594
	if (remove) {
595
		transport_remove_cmd_from_queue(cmd);
596
		transport_put_cmd(cmd);
597
	}
598 599
}

600 601
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
		bool at_head)
602 603
{
	struct se_device *dev = cmd->se_dev;
604
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
605 606 607
	unsigned long flags;

	if (t_state) {
608
		spin_lock_irqsave(&cmd->t_state_lock, flags);
609
		cmd->t_state = t_state;
610
		cmd->transport_state |= CMD_T_ACTIVE;
611
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
612 613 614
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
615 616 617 618 619 620 621

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

622
	if (at_head)
623
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
624
	else
625
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
626
	cmd->transport_state |= CMD_T_QUEUED;
627 628 629 630 631
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

632 633
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
634
{
635
	struct se_cmd *cmd;
636 637 638 639 640 641 642
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
643
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
644

645
	cmd->transport_state &= ~CMD_T_QUEUED;
646
	list_del_init(&cmd->se_queue_node);
647 648 649
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

650
	return cmd;
651 652
}

653
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
654
{
655
	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
656 657 658
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
659
	if (!(cmd->transport_state & CMD_T_QUEUED)) {
660 661 662
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
663
	cmd->transport_state &= ~CMD_T_QUEUED;
664 665
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
666 667 668
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}

669 670 671 672
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

673
	transport_generic_request_failure(cmd);
674 675
}

676
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
677
{
678
	struct se_device *dev = cmd->se_dev;
679
	int success = scsi_status == GOOD;
680 681
	unsigned long flags;

682 683 684
	cmd->scsi_status = scsi_status;


685
	spin_lock_irqsave(&cmd->t_state_lock, flags);
686
	cmd->transport_state &= ~CMD_T_BUSY;
687 688

	if (dev && dev->transport->transport_complete) {
689 690
		if (dev->transport->transport_complete(cmd,
				cmd->t_data_sg) != 0) {
691 692 693 694 695 696
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
			success = 1;
		}
	}

	/*
697
	 * See if we are waiting to complete for an exception condition.
698
	 */
699
	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
700
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701
		complete(&cmd->task_stop_comp);
702 703
		return;
	}
704 705

	if (!success)
706
		cmd->transport_state |= CMD_T_FAILED;
707

708 709 710 711 712 713 714 715 716 717
	/*
	 * Check for case where an explict ABORT_TASK has been received
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->t_transport_stop_comp);
		return;
	} else if (cmd->transport_state & CMD_T_FAILED) {
718
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
719
		INIT_WORK(&cmd->work, target_complete_failure_work);
720
	} else {
721
		INIT_WORK(&cmd->work, target_complete_ok_work);
722
	}
723 724

	cmd->t_state = TRANSPORT_COMPLETE;
725
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
726
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
727

728
	queue_work(target_completion_wq, &cmd->work);
729
}
730 731
EXPORT_SYMBOL(target_complete_cmd);

732
static void target_add_to_state_list(struct se_cmd *cmd)
733
{
734 735
	struct se_device *dev = cmd->se_dev;
	unsigned long flags;
736

737 738 739 740
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (!cmd->state_active) {
		list_add_tail(&cmd->state_list, &dev->state_list);
		cmd->state_active = true;
741
	}
742
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
743 744
}

745
static void __target_add_to_execute_list(struct se_cmd *cmd)
746
{
747 748
	struct se_device *dev = cmd->se_dev;
	bool head_of_queue = false;
749

750
	if (!list_empty(&cmd->execute_list))
751 752
		return;

753 754 755
	if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
	    cmd->sam_task_attr == MSG_HEAD_TAG)
		head_of_queue = true;
756

757 758 759 760
	if (head_of_queue)
		list_add(&cmd->execute_list, &dev->execute_list);
	else
		list_add_tail(&cmd->execute_list, &dev->execute_list);
761

762
	atomic_inc(&dev->execute_tasks);
763

764 765
	if (cmd->state_active)
		return;
766

767 768 769 770
	if (head_of_queue)
		list_add(&cmd->state_list, &dev->state_list);
	else
		list_add_tail(&cmd->state_list, &dev->state_list);
771

772
	cmd->state_active = true;
773 774
}

775
static void target_add_to_execute_list(struct se_cmd *cmd)
776 777 778 779 780
{
	unsigned long flags;
	struct se_device *dev = cmd->se_dev;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
781
	__target_add_to_execute_list(cmd);
782 783 784
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

785
void __target_remove_from_execute_list(struct se_cmd *cmd)
786
{
787 788
	list_del_init(&cmd->execute_list);
	atomic_dec(&cmd->se_dev->execute_tasks);
789 790
}

791
static void target_remove_from_execute_list(struct se_cmd *cmd)
792
{
793
	struct se_device *dev = cmd->se_dev;
794 795
	unsigned long flags;

796
	if (WARN_ON(list_empty(&cmd->execute_list)))
797 798
		return;

799
	spin_lock_irqsave(&dev->execute_task_lock, flags);
800
	__target_remove_from_execute_list(cmd);
801 802 803
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

804
/*
805
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
806 807 808 809 810 811
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
812
	LIST_HEAD(qf_cmd_list);
813 814 815
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
816 817
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
818

819
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
820 821 822 823
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

824
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
825
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
826
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
827 828
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
829 830

		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
831 832 833
	}
}

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

877 878
	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
		atomic_read(&dev->execute_tasks), dev->queue_depth);
879 880 881
	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
		dev->se_sub_dev->se_dev_attrib.block_size,
		dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
935
		pr_debug("%s", buf);
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
960 961
	int ret = 0;
	int len;
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
978
		ret = -EINVAL;
979 980 981 982 983 984
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
985
		pr_debug("%s", buf);
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1008 1009
	int ret = 0;
	int len;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1036
		ret = -EINVAL;
1037 1038 1039
		break;
	}

1040 1041 1042
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1043
		strncpy(p_buf, buf, p_buf_len);
1044
	} else {
1045
		pr_debug("%s", buf);
1046
	}
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1089
		ret = -EINVAL;
1090 1091 1092 1093 1094 1095
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1096
		pr_debug("%s", buf);
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1147
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1148 1149 1150 1151 1152
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1153
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1154 1155
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1156 1157 1158 1159
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1160
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1161
	char buf[17];
1162 1163 1164 1165 1166 1167
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1168
			buf[i] = wwn->vendor[i];
1169
		else
1170 1171 1172
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1173 1174 1175

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1176
			buf[i] = wwn->model[i];
1177
		else
1178 1179 1180
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1181 1182 1183

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1184
			buf[i] = wwn->revision[i];
1185
		else
1186 1187 1188
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1189

1190
	device_type = dev->transport->get_device_type(dev);
1191 1192
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1193
				dev->transport->get_device_rev(dev));
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1206
	int force_pt;
1207 1208 1209
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1210 1211
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1212 1213 1214
		return NULL;
	}

1215
	transport_init_queue_obj(&dev->dev_queue_obj);
1216 1217
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1218
	dev->dev_ptr		= transport_dev;
1219 1220 1221 1222 1223 1224
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
1225
	INIT_LIST_HEAD(&dev->execute_list);
1226
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1227
	INIT_LIST_HEAD(&dev->state_list);
1228
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1229 1230 1231 1232 1233 1234
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1235
	spin_lock_init(&dev->qf_cmd_lock);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1270
					  "LIO_%s", dev->transport->name);
1271
	if (IS_ERR(dev->process_thread)) {
1272
		pr_err("Unable to create kthread: LIO_%s\n",
1273
			dev->transport->name);
1274 1275
		goto out;
	}
1276 1277 1278 1279
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1280 1281 1282 1283 1284 1285 1286 1287
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1288
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1289
		if (!inquiry_prod || !inquiry_rev) {
1290
			pr_err("All non TCM/pSCSI plugins require"
1291 1292 1293 1294
				" INQUIRY consts\n");
			goto out;
		}

1295 1296 1297
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1298 1299 1300
	}
	scsi_dump_inquiry(dev);

1301
	return dev;
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
1338
	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1339 1340 1341 1342 1343 1344 1345
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
static int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
	struct se_device *dev = cmd->se_dev;

	if (cmd->unknown_data_length) {
		cmd->data_length = size;
	} else if (size != cmd->data_length) {
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
				cmd->data_length, size, cmd->t_task_cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
			pr_err("Rejecting underflow/overflow"
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_CDB.
		 */
		if (dev->se_sub_dev->se_dev_attrib.block_size != 512)  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
				" CDB on non 512-byte sector setup subsystem"
				" plugin: %s\n", dev->transport->name);
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

	return 0;

out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
	return -EINVAL;
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1408 1409
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1410
	INIT_LIST_HEAD(&cmd->se_qf_node);
1411
	INIT_LIST_HEAD(&cmd->se_queue_node);
1412
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1413 1414
	INIT_LIST_HEAD(&cmd->execute_list);
	INIT_LIST_HEAD(&cmd->state_list);
1415 1416 1417
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1418
	init_completion(&cmd->cmd_wait_comp);
1419
	init_completion(&cmd->task_stop_comp);
1420
	spin_lock_init(&cmd->t_state_lock);
1421
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1422 1423 1424 1425 1426 1427 1428

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
1429 1430

	cmd->state_active = false;
1431 1432 1433 1434 1435 1436 1437 1438 1439
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1440
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1441 1442
		return 0;

1443
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1444
		pr_debug("SAM Task Attribute ACA"
1445
			" emulation is not supported\n");
1446
		return -EINVAL;
1447 1448 1449 1450 1451
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1452
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1453
	smp_mb__after_atomic_inc();
1454
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1455
			cmd->se_ordered_id, cmd->sam_task_attr,
1456
			cmd->se_dev->transport->name);
1457 1458 1459
	return 0;
}

1460
/*	target_setup_cmd_from_cdb():
1461 1462 1463
 *
 *	Called from fabric RX Thread.
 */
1464
int target_setup_cmd_from_cdb(
1465 1466 1467
	struct se_cmd *cmd,
	unsigned char *cdb)
{
1468 1469 1470 1471
	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
	u32 pr_reg_type = 0;
	u8 alua_ascq = 0;
	unsigned long flags;
1472
	unsigned int size;
1473 1474 1475 1476 1477 1478 1479 1480
	int ret;

	transport_generic_prepare_cdb(cdb);
	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1481
		pr_err("Received SCSI CDB with command_size: %d that"
1482 1483
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1484 1485
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1486
		return -EINVAL;
1487 1488 1489 1490 1491 1492
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1493 1494
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1495
						GFP_KERNEL);
1496 1497
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1498
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1499
				scsi_command_size(cdb),
1500
				(unsigned long)sizeof(cmd->__t_task_cdb));
1501 1502 1503
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1504
			return -ENOMEM;
1505 1506
		}
	} else
1507
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1508
	/*
1509
	 * Copy the original CDB into cmd->
1510
	 */
1511
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563

	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
		return -EINVAL;
	}

	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
	if (ret != 0) {
		/*
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
			pr_debug("[%s]: ALUA TG Port not available, "
				"SenseKey: NOT_READY, ASC/ASCQ: "
				"0x04/0x%02x\n",
				cmd->se_tfo->get_fabric_name(), alua_ascq);

			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
			return -EINVAL;
		}
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
		return -EINVAL;
	}

	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

1564 1565 1566 1567 1568
	ret = cmd->se_dev->transport->parse_cdb(cmd, &size);
	if (ret < 0)
		return ret;

	ret = target_cmd_size_check(cmd, size);
1569 1570
	if (ret < 0)
		return ret;
1571 1572 1573 1574 1575

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

1576 1577 1578 1579 1580 1581
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1582
		return -EINVAL;
1583 1584 1585 1586 1587 1588 1589
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
1590
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1591

1592 1593 1594 1595 1596 1597 1598
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1599 1600
	int ret;

1601 1602
	if (!cmd->se_lun) {
		dump_stack();
1603
		pr_err("cmd->se_lun is NULL\n");
1604 1605 1606 1607
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1608
		pr_err("transport_generic_handle_cdb cannot be called"
1609 1610 1611
				" from interrupt context\n");
		return -EINVAL;
	}
1612
	/*
1613
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1614 1615
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
1616
	 * correctly during shutdown via transport_wait_for_tasks()
1617 1618 1619 1620 1621
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1622 1623
	cmd->transport_state |= CMD_T_ACTIVE;

1624 1625 1626 1627 1628 1629
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1630 1631 1632
	if (ret < 0)
		transport_generic_request_failure(cmd);

1633
	return 0;
1634 1635 1636
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1653
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
1671 1672
	if (flags & TARGET_SCF_UNKNOWN_SIZE)
		se_cmd->unknown_data_length = 1;
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1688 1689 1690 1691 1692 1693
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
		return;
	}
1694

1695
	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1696 1697 1698 1699
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
		return;
	}
1700 1701 1702 1703 1704 1705 1706

	/*
	 * Check if we need to delay processing because of ALUA
	 * Active/NonOptimized primary access state..
	 */
	core_alua_check_nonop_delay(se_cmd);

1707 1708 1709 1710 1711 1712 1713
	/*
	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
	 * for immediate execution of READs, otherwise wait for
	 * transport_generic_handle_data() to be called for WRITEs
	 * when fabric has filled the incoming buffer.
	 */
	transport_handle_cdb_direct(se_cmd);
1714
	return;
1715 1716 1717
}
EXPORT_SYMBOL(target_submit_cmd);

1718 1719 1720 1721 1722 1723 1724 1725 1726
static void target_complete_tmr_failure(struct work_struct *work)
{
	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);

	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
	transport_generic_free_cmd(se_cmd, 0);
}

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1737 1738
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
1739
 * @flags: submit cmd flags
1740 1741 1742 1743
 *
 * Callable from all contexts.
 **/

1744
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1745
		unsigned char *sense, u32 unpacked_lun,
1746 1747
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, unsigned int tag, int flags)
1748 1749 1750 1751 1752 1753 1754 1755 1756
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1757 1758 1759 1760
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
1761
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1762 1763
	if (ret < 0)
		return -ENOMEM;
1764

1765 1766 1767
	if (tm_type == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = tag;

1768 1769 1770 1771 1772
	/* See target_submit_cmd for commentary */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1773 1774 1775 1776 1777 1778
		/*
		 * For callback during failure handling, push this work off
		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
		 */
		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
		schedule_work(&se_cmd->work);
1779
		return 0;
1780 1781
	}
	transport_generic_handle_tmr(se_cmd);
1782
	return 0;
1783 1784 1785
}
EXPORT_SYMBOL(target_submit_tmr);

1786 1787 1788 1789 1790 1791 1792 1793
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1794
	if (!cmd->se_lun) {
1795
		dump_stack();
1796
		pr_err("cmd->se_lun is NULL\n");
1797
		return -EINVAL;
1798 1799
	}

1800
	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1819
		return -EPERM;
1820 1821 1822 1823
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1824
	 * fabric module as we are expecting no further incoming DATA OUT
1825 1826 1827 1828 1829
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

1830
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
1842
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1843 1844 1845 1846
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1847
/*
1848
 * If the cmd is active, request it to be stopped and sleep until it
1849 1850
 * has completed.
 */
1851
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1852 1853 1854
{
	bool was_active = false;

1855 1856
	if (cmd->transport_state & CMD_T_BUSY) {
		cmd->transport_state |= CMD_T_REQUEST_STOP;
1857 1858
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

1859 1860 1861
		pr_debug("cmd %p waiting to complete\n", cmd);
		wait_for_completion(&cmd->task_stop_comp);
		pr_debug("cmd %p stopped successfully\n", cmd);
1862 1863

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1864 1865
		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
		cmd->transport_state &= ~CMD_T_BUSY;
1866 1867 1868 1869 1870 1871
		was_active = true;
	}

	return was_active;
}

1872 1873 1874
/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1875
void transport_generic_request_failure(struct se_cmd *cmd)
1876
{
1877 1878
	int ret = 0;

1879
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1880
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1881
		cmd->t_task_cdb[0]);
1882
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1883
		cmd->se_tfo->get_cmd_state(cmd),
1884
		cmd->t_state, cmd->scsi_sense_reason);
1885
	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1886 1887 1888
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1889 1890 1891 1892 1893 1894 1895

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1907
		break;
1908
	case TCM_RESERVATION_CONFLICT:
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
1923 1924 1925
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1926 1927 1928
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

1929
		ret = cmd->se_tfo->queue_status(cmd);
1930
		if (ret == -EAGAIN || ret == -ENOMEM)
1931
			goto queue_full;
1932 1933
		goto check_stop;
	default:
1934
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1935
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1936 1937 1938
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
1939 1940 1941 1942 1943 1944 1945
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
1946 1947 1948 1949
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
1950

1951 1952
check_stop:
	transport_lun_remove_cmd(cmd);
1953
	if (!transport_cmd_check_stop_to_fabric(cmd))
1954
		;
1955 1956 1957
	return;

queue_full:
1958 1959
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
1960
}
1961
EXPORT_SYMBOL(transport_generic_request_failure);
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
1972
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1973 1974
		return 1;
	/*
L
Lucas De Marchi 已提交
1975
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1976 1977
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
1978
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1979
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
1980
			" 0x%02x, se_ordered_id: %u\n",
1981
			cmd->t_task_cdb[0],
1982 1983
			cmd->se_ordered_id);
		return 1;
1984
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1985
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
1986 1987
		smp_mb__after_atomic_inc();

1988
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1989
				" list, se_ordered_id: %u\n",
1990
				cmd->t_task_cdb[0],
1991 1992 1993 1994 1995 1996
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
1997
		if (!atomic_read(&cmd->se_dev->simple_cmds))
1998 1999 2000 2001 2002
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
2003
		atomic_inc(&cmd->se_dev->simple_cmds);
2004 2005 2006 2007 2008 2009 2010
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2011
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2012 2013
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2014
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2015
		 */
2016
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2017
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2018 2019 2020
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2021

2022
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2023
			" delayed CMD list, se_ordered_id: %u\n",
2024
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
2042
static void transport_execute_tasks(struct se_cmd *cmd)
2043 2044
{
	int add_tasks;
2045
	struct se_device *se_dev = cmd->se_dev;
2046 2047
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2048
	 * has occurred that prevents execution.
2049
	 */
2050
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2051 2052 2053 2054 2055
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2056 2057 2058 2059
		if (add_tasks) {
			__transport_execute_tasks(se_dev, cmd);
			return;
		}
2060
	}
2061
	__transport_execute_tasks(se_dev, NULL);
2062 2063
}

2064
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2065 2066 2067 2068 2069 2070
{
	int error;
	struct se_cmd *cmd = NULL;
	unsigned long flags;

check_depth:
2071
	spin_lock_irq(&dev->execute_task_lock);
2072
	if (new_cmd != NULL)
2073
		__target_add_to_execute_list(new_cmd);
2074

2075
	if (list_empty(&dev->execute_list)) {
2076
		spin_unlock_irq(&dev->execute_task_lock);
2077 2078
		return 0;
	}
2079 2080
	cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
	__target_remove_from_execute_list(cmd);
2081
	spin_unlock_irq(&dev->execute_task_lock);
2082

2083
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2084
	cmd->transport_state |= CMD_T_BUSY;
2085
	cmd->transport_state |= CMD_T_SENT;
2086

2087
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2088

2089 2090
	if (cmd->execute_cmd)
		error = cmd->execute_cmd(cmd);
2091 2092 2093 2094
	else {
		error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
				cmd->t_data_nents, cmd->data_direction);
	}
2095

2096 2097
	if (error != 0) {
		spin_lock_irqsave(&cmd->t_state_lock, flags);
2098
		cmd->transport_state &= ~CMD_T_BUSY;
2099
		cmd->transport_state &= ~CMD_T_SENT;
2100
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2101

2102
		transport_generic_request_failure(cmd);
2103 2104
	}

2105
	new_cmd = NULL;
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	goto check_depth;

	return 0;
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2117
	struct se_device *dev = cmd->se_dev;
2118 2119 2120
	unsigned long flags;
	u32 offset = 0;

2121 2122
	WARN_ON(!cmd->se_lun);

2123 2124 2125
	if (!dev)
		return 0;

2126
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2127
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2128
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2129 2130 2131
		return 0;
	}

2132 2133
	if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
		goto out;
2134

2135 2136 2137 2138
	if (!dev->transport->get_sense_buffer) {
		pr_err("dev->transport->get_sense_buffer is NULL\n");
		goto out;
	}
2139

2140
	sense_buffer = dev->transport->get_sense_buffer(cmd);
2141
	if (!sense_buffer) {
2142
		pr_err("ITT 0x%08x cmd %p: Unable to locate"
2143
			" sense buffer for task with sense\n",
2144
			cmd->se_tfo->get_task_tag(cmd), cmd);
2145
		goto out;
2146
	}
2147

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);

	memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);

	/* Automatically padded */
	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;

	pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
	return 0;

2161
out:
2162
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2163 2164 2165
	return -1;
}

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2181 2182
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2183 2184 2185
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2186
		return -EINVAL;
2187 2188
	}

2189
	return 0;
2190 2191
}

2192
/*
2193
 * Called from I/O completion to determine which dormant/delayed
2194 2195 2196 2197
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
2198
	struct se_device *dev = cmd->se_dev;
2199 2200 2201
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

2202
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
2203 2204 2205
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
2206
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
2207 2208
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
2209
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2210
		dev->dev_cur_ordered_id++;
2211
		pr_debug("Incremented dev_cur_ordered_id: %u for"
2212 2213
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
2214
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2215 2216 2217 2218
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
2219
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
2220 2221 2222 2223 2224 2225 2226 2227 2228
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
2229
			&dev->delayed_cmd_list, se_delayed_node) {
2230

2231
		list_del(&cmd_p->se_delayed_node);
2232 2233
		spin_unlock(&dev->delayed_cmd_lock);

2234
		pr_debug("Calling add_tasks() for"
2235 2236
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
2237
			cmd_p->t_task_cdb[0],
2238 2239
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

2240
		target_add_to_execute_list(cmd_p);
2241 2242 2243
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
2244
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
2245 2246 2247 2248 2249 2250 2251 2252
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
2253
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
2254 2255
}

2256
static void transport_complete_qf(struct se_cmd *cmd)
2257 2258 2259
{
	int ret = 0;

2260 2261 2262 2263 2264 2265 2266 2267
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
2268 2269 2270 2271 2272 2273

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
2274
		if (cmd->t_bidi_data_sg) {
2275 2276
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
2277
				break;
2278 2279 2280 2281 2282 2283 2284 2285 2286
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

2287 2288 2289 2290 2291 2292 2293
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
2294 2295 2296 2297
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
2298
	struct se_device *dev)
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

2309
static void target_complete_ok_work(struct work_struct *work)
2310
{
2311
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2312
	int reason = 0, ret;
2313

2314 2315 2316 2317 2318
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
2319
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2320
		transport_complete_task_attr(cmd);
2321 2322 2323 2324 2325 2326 2327
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

2328 2329 2330 2331 2332 2333 2334 2335 2336
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		if (cmd->scsi_status) {
2337
			ret = transport_send_check_condition_and_sense(
2338
					cmd, reason, 1);
2339
			if (ret == -EAGAIN || ret == -ENOMEM)
2340 2341
				goto queue_full;

2342 2343 2344 2345 2346 2347
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
2348
	 * Check for a callback, used by amongst other things
2349 2350 2351 2352 2353 2354 2355 2356
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
2357 2358
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2359 2360 2361 2362
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

2363
		ret = cmd->se_tfo->queue_data_in(cmd);
2364
		if (ret == -EAGAIN || ret == -ENOMEM)
2365
			goto queue_full;
2366 2367 2368
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
2369 2370
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
2371 2372 2373 2374 2375 2376
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
2377
		if (cmd->t_bidi_data_sg) {
2378
			spin_lock(&cmd->se_lun->lun_sep_lock);
2379 2380
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2381 2382 2383
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
2384
			ret = cmd->se_tfo->queue_data_in(cmd);
2385
			if (ret == -EAGAIN || ret == -ENOMEM)
2386
				goto queue_full;
2387 2388 2389 2390
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
2391
		ret = cmd->se_tfo->queue_status(cmd);
2392
		if (ret == -EAGAIN || ret == -ENOMEM)
2393
			goto queue_full;
2394 2395 2396 2397 2398 2399 2400
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
2401 2402 2403
	return;

queue_full:
2404
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2405
		" data_direction: %d\n", cmd, cmd->data_direction);
2406 2407
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
2408 2409
}

2410
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2411
{
2412 2413
	struct scatterlist *sg;
	int count;
2414

2415 2416
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
2417

2418 2419
	kfree(sgl);
}
2420

2421 2422 2423 2424 2425 2426
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2427 2428
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
2429

2430
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2431 2432
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
2433 2434
}

C
Christoph Hellwig 已提交
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

2446
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
2447 2448 2449 2450
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
2451 2452
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
2453
	 */
2454 2455 2456 2457
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
2458 2459 2460
	cmd->se_tfo->release_cmd(cmd);
}

2461 2462 2463 2464 2465 2466
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
2467
static void transport_put_cmd(struct se_cmd *cmd)
2468 2469 2470
{
	unsigned long flags;

2471
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2472 2473 2474 2475 2476
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

2477 2478
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2479
		target_remove_from_state_list(cmd);
2480
	}
2481
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2482 2483

	transport_free_pages(cmd);
2484
	transport_release_cmd(cmd);
2485
	return;
2486 2487
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2488 2489 2490
}

/*
2491 2492
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
2504 2505 2506 2507
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
2508
{
2509
	if (!sgl || !sgl_count)
2510 2511
		return 0;

2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	/*
	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
	 * scatterlists already have been set to follow what the fabric
	 * passes for the original expected data transfer length.
	 */
	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		pr_warn("Rejecting SCSI DATA overflow for fabric using"
			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
		return -EINVAL;
	}
2524

2525 2526
	cmd->t_data_sg = sgl;
	cmd->t_data_nents = sgl_count;
2527

2528 2529 2530
	if (sgl_bidi && sgl_bidi_count) {
		cmd->t_bidi_data_sg = sgl_bidi;
		cmd->t_bidi_data_nents = sgl_bidi_count;
2531
	}
2532
	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
2533 2534 2535 2536
	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

2537
void *transport_kmap_data_sg(struct se_cmd *cmd)
2538
{
2539
	struct scatterlist *sg = cmd->t_data_sg;
2540 2541
	struct page **pages;
	int i;
2542

2543
	BUG_ON(!sg);
2544
	/*
2545 2546 2547
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2548
	 */
2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2570
}
2571
EXPORT_SYMBOL(transport_kmap_data_sg);
2572

2573
void transport_kunmap_data_sg(struct se_cmd *cmd)
2574
{
2575
	if (!cmd->t_data_nents) {
2576
		return;
2577
	} else if (cmd->t_data_nents == 1) {
2578
		kunmap(sg_page(cmd->t_data_sg));
2579 2580
		return;
	}
2581 2582 2583

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
2584
}
2585
EXPORT_SYMBOL(transport_kunmap_data_sg);
2586

2587
static int
2588
transport_generic_get_mem(struct se_cmd *cmd)
2589
{
2590 2591 2592
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
2593
	gfp_t zero_flag;
2594
	int i = 0;
2595

2596 2597 2598 2599
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
2600

2601 2602
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
2603

2604
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
2605

2606 2607
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
2608
		page = alloc_page(GFP_KERNEL | zero_flag);
2609 2610
		if (!page)
			goto out;
2611

2612 2613 2614
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
2615 2616 2617
	}
	return 0;

2618 2619 2620 2621
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
2622
	}
2623 2624 2625
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
2626 2627
}

2628
/*
2629 2630 2631
 * Allocate any required resources to execute the command.  For writes we
 * might not have the payload yet, so notify the fabric via a call to
 * ->write_pending instead. Otherwise place it on the execution queue.
2632
 */
2633
int transport_generic_new_cmd(struct se_cmd *cmd)
2634
{
2635
	struct se_device *dev = cmd->se_dev;
2636 2637 2638 2639 2640
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2641
	 * beforehand.
2642
	 */
2643 2644
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
2645
		ret = transport_generic_get_mem(cmd);
2646
		if (ret < 0)
2647
			goto out_fail;
2648
	}
2649

2650
	/* Workaround for handling zero-length control CDBs */
2651
	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
2652
		spin_lock_irq(&cmd->t_state_lock);
2653
		cmd->t_state = TRANSPORT_COMPLETE;
2654 2655
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
2656 2657 2658 2659 2660 2661 2662 2663

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

2664 2665 2666 2667
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
2668

2669
	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
2670 2671 2672 2673 2674 2675 2676
		struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;

		if (transport_cmd_get_valid_sectors(cmd) < 0)
			return -EINVAL;

		BUG_ON(cmd->data_length % attr->block_size);
		BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
2677
			attr->hw_max_sectors);
2678 2679
	}

2680 2681
	atomic_inc(&cmd->t_fe_count);

2682
	/*
2683 2684 2685 2686
	 * For WRITEs, let the fabric know its buffer is ready.
	 *
	 * The command will be added to the execution queue after its write
	 * data has arrived.
2687 2688
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
2689
		target_add_to_state_list(cmd);
2690 2691 2692
		return transport_generic_write_pending(cmd);
	}
	/*
2693
	 * Everything else but a WRITE, add the command to the execution queue.
2694 2695 2696
	 */
	transport_execute_tasks(cmd);
	return 0;
2697 2698 2699 2700 2701

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
2702
}
2703
EXPORT_SYMBOL(transport_generic_new_cmd);
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

2715
static void transport_write_pending_qf(struct se_cmd *cmd)
2716
{
2717 2718 2719 2720
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
2721 2722 2723 2724
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
2725 2726
}

2727 2728 2729 2730 2731
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

2732
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2733
	cmd->t_state = TRANSPORT_WRITE_PENDING;
2734
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2735

2736 2737
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
2738 2739 2740
	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
	 * from HW target mode interrupt code.  This is safe to be called
	 * with transport_off=1 before the cmd->se_tfo->write_pending
2741 2742 2743 2744 2745 2746 2747 2748
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
2749
	ret = cmd->se_tfo->write_pending(cmd);
2750
	if (ret == -EAGAIN || ret == -ENOMEM)
2751 2752
		goto queue_full;
	else if (ret < 0)
2753 2754
		return ret;

2755
	return 1;
2756 2757

queue_full:
2758
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2759
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2760
	transport_handle_queue_full(cmd, cmd->se_dev);
2761
	return 0;
2762 2763
}

2764
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2765
{
2766
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2767
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2768 2769
			 transport_wait_for_tasks(cmd);

2770
		transport_release_cmd(cmd);
2771 2772 2773 2774
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

2775 2776
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

2777
		if (cmd->se_lun)
2778 2779
			transport_lun_remove_cmd(cmd);

2780
		transport_put_cmd(cmd);
2781 2782 2783 2784
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

2785 2786 2787
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
2788
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2789
 */
2790 2791
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			bool ack_kref)
2792 2793 2794
{
	unsigned long flags;

2795
	kref_init(&se_cmd->cmd_kref);
2796 2797 2798 2799 2800
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
2801
	if (ack_kref == true) {
2802
		kref_get(&se_cmd->cmd_kref);
2803 2804
		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
	}
2805

2806 2807 2808 2809 2810 2811 2812
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_get_sess_cmd);

2813
static void target_release_cmd_kref(struct kref *kref)
2814
{
2815 2816
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
2817 2818 2819 2820 2821
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2822
		se_cmd->se_tfo->release_cmd(se_cmd);
2823
		return;
2824 2825 2826 2827
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
2828
		return;
2829 2830 2831 2832
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
}
EXPORT_SYMBOL(target_put_sess_cmd);

/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 * @se_sess:	session to split
 */
void target_splice_sess_cmd_list(struct se_session *se_sess)
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	WARN_ON(!list_empty(&se_sess->sess_wait_list));
	INIT_LIST_HEAD(&se_sess->sess_wait_list);

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	se_sess->sess_tearing_down = 1;

	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);

	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_splice_sess_cmd_list);

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
				&se_sess->sess_wait_list, se_cmd_list) {
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

2912 2913 2914 2915 2916 2917 2918 2919
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
2920 2921
	int ret = 0;

2922 2923 2924 2925
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
2926
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2927 2928 2929 2930 2931
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
2932
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2933
		transport_cmd_check_stop(cmd, 1, 0);
2934
		return -EPERM;
2935
	}
2936
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
2937
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2938

2939
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
2940

2941 2942 2943 2944 2945 2946
	// XXX: audit task_flags checks.
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if ((cmd->transport_state & CMD_T_BUSY) &&
	    (cmd->transport_state & CMD_T_SENT)) {
		if (!target_stop_cmd(cmd, &flags))
			ret++;
2947
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2948 2949 2950 2951 2952
	} else {
		spin_unlock_irqrestore(&cmd->t_state_lock,
				flags);
		target_remove_from_execute_list(cmd);
	}
2953

2954 2955
	pr_debug("ConfigFS: cmd: %p stop tasks ret:"
			" %d\n", cmd, ret);
2956
	if (!ret) {
2957
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2958
				cmd->se_tfo->get_task_tag(cmd));
2959
		wait_for_completion(&cmd->transport_lun_stop_comp);
2960
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2961
				cmd->se_tfo->get_task_tag(cmd));
2962
	}
2963
	transport_remove_cmd_from_queue(cmd);
2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2977 2978 2979
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
2980
		list_del_init(&cmd->se_lun_node);
2981

2982 2983 2984 2985 2986
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
2987
		spin_lock(&cmd->t_state_lock);
2988
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
2989
			"_lun_stop for  ITT: 0x%08x\n",
2990 2991
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
2992
		cmd->transport_state |= CMD_T_LUN_STOP;
2993
		spin_unlock(&cmd->t_state_lock);
2994 2995 2996

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

2997 2998
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2999 3000
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
3001 3002 3003 3004 3005 3006
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
3007
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
3008 3009
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
3010

3011
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
3012 3013 3014 3015
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

3016
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
3017
			"_wait_for_tasks(): SUCCESS\n",
3018 3019
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
3020

3021
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
3022
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
3023
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3024 3025
			goto check_cond;
		}
3026
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3027
		target_remove_from_state_list(cmd);
3028
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043

		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
3044
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
3045
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
3046
			pr_debug("SE_LUN[%d] - Detected FE stop for"
3047 3048
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
3049
				cmd, cmd->se_tfo->get_task_tag(cmd));
3050

3051
			spin_unlock_irqrestore(&cmd->t_state_lock,
3052 3053
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
3054
			complete(&cmd->transport_lun_fe_stop_comp);
3055 3056 3057
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
3058
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
3059
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
3060

3061
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3062 3063 3064 3065 3066 3067 3068
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
3069
	struct se_lun *lun = p;
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

3081
	kt = kthread_run(transport_clear_lun_thread, lun,
3082 3083
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
3084
		pr_err("Unable to start clear_lun thread\n");
3085
		return PTR_ERR(kt);
3086 3087 3088 3089 3090 3091
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

3092 3093 3094
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
3095
 *
3096 3097
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
3098
 */
3099
bool transport_wait_for_tasks(struct se_cmd *cmd)
3100 3101 3102
{
	unsigned long flags;

3103
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3104 3105
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3106
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3107
		return false;
3108
	}
3109

3110 3111
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3112
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3113
		return false;
3114
	}
3115 3116 3117
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
3118
	 * The cmd->transport_lun_stopped_sem will be upped by
3119 3120 3121
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
3122
	if (cmd->transport_state & CMD_T_LUN_STOP) {
3123
		pr_debug("wait_for_tasks: Stopping"
3124
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
3125
			"_stop_comp); for ITT: 0x%08x\n",
3126
			cmd->se_tfo->get_task_tag(cmd));
3127 3128 3129 3130 3131 3132 3133
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
3134 3135 3136 3137
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
3138

3139
		target_remove_from_state_list(cmd);
3140 3141 3142 3143 3144
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
3145
		pr_debug("wait_for_tasks: Stopped"
3146
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
3147
			"stop_comp); for ITT: 0x%08x\n",
3148
			cmd->se_tfo->get_task_tag(cmd));
3149

3150
		cmd->transport_state &= ~CMD_T_LUN_STOP;
3151
	}
3152

3153
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
3154
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3155
		return false;
3156
	}
3157

3158
	cmd->transport_state |= CMD_T_STOP;
3159

3160
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
3161
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
3162 3163
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
3164

3165
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3166

3167
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
3168

3169
	wait_for_completion(&cmd->t_transport_stop_comp);
3170

3171
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3172
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3173

3174
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
3175
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
3176
		cmd->se_tfo->get_task_tag(cmd));
3177

3178
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3179 3180

	return true;
3181
}
3182
EXPORT_SYMBOL(transport_wait_for_tasks);
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

3216
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3217
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3218
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3219 3220 3221
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3222
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
3235
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
3236 3237 3238 3239 3240 3241 3242
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
3243 3244
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3245
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3246 3247 3248 3249 3250
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
3251 3252 3253 3254
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3255
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3256 3257 3258 3259 3260 3261 3262 3263
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3264
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3265 3266 3267 3268 3269 3270 3271 3272
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3273
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3274 3275 3276 3277 3278 3279 3280 3281 3282
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3283
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3294
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3295 3296
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3297 3298 3299 3300 3301 3302
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3303
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3304 3305
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3306 3307 3308 3309 3310 3311
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3312
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3323
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3334
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3345
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3346 3347 3348 3349 3350 3351 3352 3353
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3354
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3355 3356 3357 3358 3359 3360 3361 3362 3363
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3364
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
3375
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
3393
	return cmd->se_tfo->queue_status(cmd);
3394 3395 3396 3397 3398 3399 3400
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

3401
	if (cmd->transport_state & CMD_T_ABORTED) {
3402
		if (!send_status ||
3403 3404
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
3405

3406
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
3407
			" status for CDB: 0x%02x ITT: 0x%08x\n",
3408
			cmd->t_task_cdb[0],
3409
			cmd->se_tfo->get_task_tag(cmd));
3410

3411
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
3412
		cmd->se_tfo->queue_status(cmd);
3413 3414 3415 3416 3417 3418 3419 3420
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
3421 3422 3423 3424 3425 3426 3427 3428 3429
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

3430 3431 3432 3433 3434 3435 3436
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
3437
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3438
			cmd->transport_state |= CMD_T_ABORTED;
3439 3440 3441 3442
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3443

3444
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
3445
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
3446
		cmd->se_tfo->get_task_tag(cmd));
3447

3448
	cmd->se_tfo->queue_status(cmd);
3449 3450
}

C
Christoph Hellwig 已提交
3451
static int transport_generic_do_tmr(struct se_cmd *cmd)
3452
{
3453
	struct se_device *dev = cmd->se_dev;
3454 3455 3456 3457
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
3458
	case TMR_ABORT_TASK:
3459
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
3460
		break;
3461 3462 3463
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
3464 3465
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
3466
	case TMR_LUN_RESET:
3467 3468 3469 3470
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
3471
	case TMR_TARGET_WARM_RESET:
3472 3473
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
3474
	case TMR_TARGET_COLD_RESET:
3475 3476 3477
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
3478
		pr_err("Uknown TMR function: 0x%02x.\n",
3479 3480 3481 3482 3483 3484
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3485
	cmd->se_tfo->queue_tm_rsp(cmd);
3486

3487
	transport_cmd_check_stop_to_fabric(cmd);
3488 3489 3490 3491 3492 3493 3494 3495 3496
	return 0;
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
3497
	int ret;
3498
	struct se_cmd *cmd;
J
Jörn Engel 已提交
3499
	struct se_device *dev = param;
3500 3501

	while (!kthread_should_stop()) {
3502 3503
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
3504 3505 3506 3507 3508
				kthread_should_stop());
		if (ret < 0)
			goto out;

get_cmd:
3509 3510
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
3511 3512
			continue;

3513
		switch (cmd->t_state) {
3514 3515 3516
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
3517
		case TRANSPORT_NEW_CMD_MAP:
3518 3519
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
3520 3521 3522
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
3523
			ret = cmd->se_tfo->new_cmd_map(cmd);
3524
			if (ret < 0) {
3525
				transport_generic_request_failure(cmd);
3526 3527 3528
				break;
			}
			ret = transport_generic_new_cmd(cmd);
3529
			if (ret < 0) {
3530 3531
				transport_generic_request_failure(cmd);
				break;
3532 3533 3534 3535 3536 3537 3538 3539
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
3540
		case TRANSPORT_COMPLETE_QF_WP:
3541 3542 3543 3544
			transport_write_pending_qf(cmd);
			break;
		case TRANSPORT_COMPLETE_QF_OK:
			transport_complete_qf(cmd);
3545
			break;
3546
		default:
3547 3548 3549
			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
				"i_state: %d on SE LUN: %u\n",
				cmd->t_state,
3550 3551 3552
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
3553 3554 3555 3556 3557 3558 3559
			BUG();
		}

		goto get_cmd;
	}

out:
3560
	WARN_ON(!list_empty(&dev->state_list));
3561
	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
3562 3563 3564
	dev->process_thread = NULL;
	return 0;
}