target_core_transport.c 77.7 KB
Newer Older
1 2 3 4 5
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
6
 * (c) Copyright 2002-2013 Datera, Inc.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
35
#include <linux/module.h>
36
#include <linux/ratelimit.h>
37
#include <linux/vmalloc.h>
38 39 40
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
41
#include <scsi/scsi_proto.h>
42
#include <scsi/scsi_common.h>
43 44

#include <target/target_core_base.h>
45 46
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
47

C
Christoph Hellwig 已提交
48
#include "target_core_internal.h"
49 50 51 52
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

53 54 55
#define CREATE_TRACE_POINTS
#include <trace/events/target.h>

56
static struct workqueue_struct *target_completion_wq;
57 58 59 60 61 62
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 64
struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
65 66

static void transport_complete_task_attr(struct se_cmd *cmd);
67
static void transport_handle_queue_full(struct se_cmd *cmd,
68
		struct se_device *dev);
69
static int transport_put_cmd(struct se_cmd *cmd);
70
static void target_complete_ok_work(struct work_struct *work);
71

72
int init_se_kmem_caches(void)
73 74 75 76
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
77 78
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
79
				" failed\n");
80
		goto out;
81 82 83 84
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
85 86
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
87
		goto out_free_sess_cache;
88 89 90 91
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
92 93
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
94
				" failed\n");
95
		goto out_free_ua_cache;
96 97 98 99
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
100 101
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102
				" failed\n");
103
		goto out_free_pr_reg_cache;
104 105 106 107
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108 109
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110
				"cache failed\n");
111
		goto out_free_lu_gp_cache;
112 113 114 115
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116 117
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118
				"cache failed\n");
119
		goto out_free_lu_gp_mem_cache;
120
	}
121 122 123 124 125 126 127
	t10_alua_lba_map_cache = kmem_cache_create(
			"t10_alua_lba_map_cache",
			sizeof(struct t10_alua_lba_map),
			__alignof__(struct t10_alua_lba_map), 0, NULL);
	if (!t10_alua_lba_map_cache) {
		pr_err("kmem_cache_create() for t10_alua_lba_map_"
				"cache failed\n");
128
		goto out_free_tg_pt_gp_cache;
129 130 131 132 133 134 135 136 137 138
	}
	t10_alua_lba_map_mem_cache = kmem_cache_create(
			"t10_alua_lba_map_mem_cache",
			sizeof(struct t10_alua_lba_map_member),
			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
	if (!t10_alua_lba_map_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
				"cache failed\n");
		goto out_free_lba_map_cache;
	}
139

140 141 142
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
143
		goto out_free_lba_map_mem_cache;
144

145
	return 0;
146

147 148 149 150
out_free_lba_map_mem_cache:
	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache:
	kmem_cache_destroy(t10_alua_lba_map_cache);
151 152 153 154 155 156 157 158 159 160 161 162
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
163
out:
164
	return -ENOMEM;
165 166
}

167
void release_se_kmem_caches(void)
168
{
169
	destroy_workqueue(target_completion_wq);
170 171 172 173 174 175
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176 177
	kmem_cache_destroy(t10_alua_lba_map_cache);
	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178 179
}

180 181 182
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183 184 185 186 187 188 189 190

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

191
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192

193 194 195
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
196 197 198 199

	return new_index;
}

200
void transport_subsystem_check_init(void)
201 202
{
	int ret;
203
	static int sub_api_initialized;
204

205 206 207
	if (sub_api_initialized)
		return;

208 209
	ret = request_module("target_core_iblock");
	if (ret != 0)
210
		pr_err("Unable to load target_core_iblock\n");
211 212 213

	ret = request_module("target_core_file");
	if (ret != 0)
214
		pr_err("Unable to load target_core_file\n");
215 216 217

	ret = request_module("target_core_pscsi");
	if (ret != 0)
218
		pr_err("Unable to load target_core_pscsi\n");
219

220 221 222 223
	ret = request_module("target_core_user");
	if (ret != 0)
		pr_err("Unable to load target_core_user\n");

224
	sub_api_initialized = 1;
225 226
}

227
struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228 229 230 231
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232 233
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
234 235 236 237 238
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
239
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
241
	spin_lock_init(&se_sess->sess_cmd_lock);
242
	kref_init(&se_sess->sess_kref);
243
	se_sess->sup_prot_ops = sup_prot_ops;
244 245 246 247 248

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

249 250 251 252 253
int transport_alloc_session_tags(struct se_session *se_sess,
			         unsigned int tag_num, unsigned int tag_size)
{
	int rc;

254 255
	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
					GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
256
	if (!se_sess->sess_cmd_map) {
257 258 259 260 261
		se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
		if (!se_sess->sess_cmd_map) {
			pr_err("Unable to allocate se_sess->sess_cmd_map\n");
			return -ENOMEM;
		}
262 263 264 265 266 267
	}

	rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
	if (rc < 0) {
		pr_err("Unable to init se_sess->sess_tag_pool,"
			" tag_num: %u\n", tag_num);
268
		kvfree(se_sess->sess_cmd_map);
269 270 271 272 273 274 275 276 277
		se_sess->sess_cmd_map = NULL;
		return -ENOMEM;
	}

	return 0;
}
EXPORT_SYMBOL(transport_alloc_session_tags);

struct se_session *transport_init_session_tags(unsigned int tag_num,
278 279
					       unsigned int tag_size,
					       enum target_prot_op sup_prot_ops)
280 281 282 283
{
	struct se_session *se_sess;
	int rc;

284
	se_sess = transport_init_session(sup_prot_ops);
285 286 287 288 289 290 291 292 293 294 295 296 297
	if (IS_ERR(se_sess))
		return se_sess;

	rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
	if (rc < 0) {
		transport_free_session(se_sess);
		return ERR_PTR(-ENOMEM);
	}

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session_tags);

298
/*
299
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
300 301 302 303 304 305 306
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
307
	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
308 309 310 311 312 313 314 315 316 317 318
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
		/*
		 *
		 * Determine if fabric allows for T10-PI feature bits exposed to
		 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
		 *
		 * If so, then always save prot_type on a per se_node_acl node
		 * basis and re-instate the previous sess_prot_type to avoid
		 * disabling PI from below any previously initiator side
		 * registered LUNs.
		 */
		if (se_nacl->saved_prot_type)
			se_sess->sess_prot_type = se_nacl->saved_prot_type;
		else if (tfo->tpg_check_prot_fabric_only)
			se_sess->sess_prot_type = se_nacl->saved_prot_type =
					tfo->tpg_check_prot_fabric_only(se_tpg);
334 335 336 337
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
338
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
339
			memset(&buf[0], 0, PR_REG_ISID_LEN);
340
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
341 342 343
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
344 345
		kref_get(&se_nacl->acl_kref);

346 347 348 349 350 351 352 353 354 355 356 357 358
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

359
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
360
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
361 362 363 364 365 366 367 368 369
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
370 371 372
	unsigned long flags;

	spin_lock_irqsave(&se_tpg->session_lock, flags);
373
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
374
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
375 376 377
}
EXPORT_SYMBOL(transport_register_session);

378
static void target_release_session(struct kref *kref)
379 380 381 382 383 384 385 386 387 388 389 390 391 392
{
	struct se_session *se_sess = container_of(kref,
			struct se_session, sess_kref);
	struct se_portal_group *se_tpg = se_sess->se_tpg;

	se_tpg->se_tpg_tfo->close_session(se_sess);
}

void target_get_session(struct se_session *se_sess)
{
	kref_get(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);

393
void target_put_session(struct se_session *se_sess)
394
{
395
	kref_put(&se_sess->sess_kref, target_release_session);
396 397 398
}
EXPORT_SYMBOL(target_put_session);

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
	struct se_session *se_sess;
	ssize_t len = 0;

	spin_lock_bh(&se_tpg->session_lock);
	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
		if (!se_sess->se_node_acl)
			continue;
		if (!se_sess->se_node_acl->dynamic_node_acl)
			continue;
		if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
			break;

		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
				se_sess->se_node_acl->initiatorname);
		len += 1; /* Include NULL terminator */
	}
	spin_unlock_bh(&se_tpg->session_lock);

	return len;
}
EXPORT_SYMBOL(target_show_dynamic_sessions);

423 424 425 426 427 428 429 430 431 432 433 434 435
static void target_complete_nacl(struct kref *kref)
{
	struct se_node_acl *nacl = container_of(kref,
				struct se_node_acl, acl_kref);

	complete(&nacl->acl_free_comp);
}

void target_put_nacl(struct se_node_acl *nacl)
{
	kref_put(&nacl->acl_kref, target_complete_nacl);
}

436 437 438
void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
439
	unsigned long flags;
440 441 442 443
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
444
	if (se_nacl) {
445
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
446 447
		if (se_nacl->acl_stop == 0)
			list_del(&se_sess->sess_acl_list);
448 449 450 451 452 453 454 455 456 457 458 459
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
460
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
461 462 463 464 465 466
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
467 468
	if (se_sess->sess_cmd_map) {
		percpu_ida_destroy(&se_sess->sess_tag_pool);
469
		kvfree(se_sess->sess_cmd_map);
470
	}
471 472 473 474 475 476 477
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
478
	const struct target_core_fabric_ops *se_tfo;
479
	struct se_node_acl *se_nacl;
480
	unsigned long flags;
481
	bool comp_nacl = true, drop_nacl = false;
482

483
	if (!se_tpg) {
484 485 486
		transport_free_session(se_sess);
		return;
	}
487
	se_tfo = se_tpg->se_tpg_tfo;
488

489
	spin_lock_irqsave(&se_tpg->session_lock, flags);
490 491 492
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
493
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
494 495 496 497 498 499

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
500

501
	mutex_lock(&se_tpg->acl_node_mutex);
502 503 504 505
	if (se_nacl && se_nacl->dynamic_node_acl) {
		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
			list_del(&se_nacl->acl_list);
			se_tpg->num_node_acls--;
506
			drop_nacl = true;
507 508
		}
	}
509
	mutex_unlock(&se_tpg->acl_node_mutex);
510

511 512 513 514 515 516
	if (drop_nacl) {
		core_tpg_wait_for_nacl_pr_ref(se_nacl);
		core_free_device_list_for_node(se_nacl, se_tpg);
		kfree(se_nacl);
		comp_nacl = false;
	}
517
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
518
		se_tpg->se_tpg_tfo->get_fabric_name());
519
	/*
520
	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
521 522
	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
	 * removal context.
523
	 */
524
	if (se_nacl && comp_nacl)
525
		target_put_nacl(se_nacl);
526

527
	transport_free_session(se_sess);
528 529 530 531
}
EXPORT_SYMBOL(transport_deregister_session);

/*
532
 * Called with cmd->t_state_lock held.
533
 */
534
static void target_remove_from_state_list(struct se_cmd *cmd)
535
{
536
	struct se_device *dev = cmd->se_dev;
537 538
	unsigned long flags;

539 540
	if (!dev)
		return;
541

542 543
	if (cmd->transport_state & CMD_T_BUSY)
		return;
544

545 546 547 548
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (cmd->state_active) {
		list_del(&cmd->state_list);
		cmd->state_active = false;
549
	}
550
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
551 552
}

553 554
static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
				    bool write_pending)
555 556 557
{
	unsigned long flags;

558
	spin_lock_irqsave(&cmd->t_state_lock, flags);
559 560 561
	if (write_pending)
		cmd->t_state = TRANSPORT_WRITE_PENDING;

562 563 564 565 566 567 568 569 570
	if (remove_from_lists) {
		target_remove_from_state_list(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the handoff to FE.
		 */
		cmd->se_lun = NULL;
	}

571 572
	/*
	 * Determine if frontend context caller is requesting the stopping of
573
	 * this command for frontend exceptions.
574
	 */
575
	if (cmd->transport_state & CMD_T_STOP) {
576 577
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
			__func__, __LINE__, cmd->tag);
578

579
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
580

581
		complete_all(&cmd->t_transport_stop_comp);
582 583
		return 1;
	}
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

	cmd->transport_state &= ~CMD_T_ACTIVE;
	if (remove_from_lists) {
		/*
		 * Some fabric modules like tcm_loop can release
		 * their internally allocated I/O reference now and
		 * struct se_cmd now.
		 *
		 * Fabric modules are expected to return '1' here if the
		 * se_cmd being passed is released at this point,
		 * or zero if not being released.
		 */
		if (cmd->se_tfo->check_stop_free != NULL) {
			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
			return cmd->se_tfo->check_stop_free(cmd);
599
		}
600
	}
601

602
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
603 604 605 606 607
	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
608
	return transport_cmd_check_stop(cmd, true, false);
609 610 611 612
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
613
	struct se_lun *lun = cmd->se_lun;
614

615
	if (!lun)
616 617
		return;

618 619
	if (cmpxchg(&cmd->lun_ref_active, true, false))
		percpu_ref_put(&lun->lun_ref);
620 621 622 623
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
624 625
	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
		transport_lun_remove_cmd(cmd);
626 627 628 629 630 631
	/*
	 * Allow the fabric driver to unmap any resources before
	 * releasing the descriptor via TFO->release_cmd()
	 */
	if (remove)
		cmd->se_tfo->aborted_task(cmd);
632

633 634
	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
635
	if (remove)
636
		transport_put_cmd(cmd);
637 638
}

639 640 641 642
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

643 644
	transport_generic_request_failure(cmd,
			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
645 646
}

647
/*
648 649
 * Used when asking transport to copy Sense Data from the underlying
 * Linux/SCSI struct scsi_cmnd
650
 */
651
static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
652 653 654 655 656 657
{
	struct se_device *dev = cmd->se_dev;

	WARN_ON(!cmd->se_lun);

	if (!dev)
658
		return NULL;
659

660 661
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
		return NULL;
662

663
	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
664

665
	pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
666
		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
667
	return cmd->sense_buffer;
668 669
}

670
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
671
{
672
	struct se_device *dev = cmd->se_dev;
673
	int success = scsi_status == GOOD;
674 675
	unsigned long flags;

676 677 678
	cmd->scsi_status = scsi_status;


679
	spin_lock_irqsave(&cmd->t_state_lock, flags);
680
	cmd->transport_state &= ~CMD_T_BUSY;
681 682

	if (dev && dev->transport->transport_complete) {
683 684 685 686
		dev->transport->transport_complete(cmd,
				cmd->t_data_sg,
				transport_get_sense_buffer(cmd));
		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
687 688 689 690
			success = 1;
	}

	/*
691
	 * See if we are waiting to complete for an exception condition.
692
	 */
693
	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
694
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
695
		complete(&cmd->task_stop_comp);
696 697
		return;
	}
698

699
	/*
700
	 * Check for case where an explicit ABORT_TASK has been received
701 702 703 704 705
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
706
		complete_all(&cmd->t_transport_stop_comp);
707
		return;
708
	} else if (!success) {
709
		INIT_WORK(&cmd->work, target_complete_failure_work);
710
	} else {
711
		INIT_WORK(&cmd->work, target_complete_ok_work);
712
	}
713 714

	cmd->t_state = TRANSPORT_COMPLETE;
715
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
716
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
717

718
	queue_work(target_completion_wq, &cmd->work);
719
}
720 721
EXPORT_SYMBOL(target_complete_cmd);

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
			cmd->residual_count += cmd->data_length - length;
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = cmd->data_length - length;
		}

		cmd->data_length = length;
	}

	target_complete_cmd(cmd, scsi_status);
}
EXPORT_SYMBOL(target_complete_cmd_with_length);

739
static void target_add_to_state_list(struct se_cmd *cmd)
740
{
741 742
	struct se_device *dev = cmd->se_dev;
	unsigned long flags;
743

744 745 746 747
	spin_lock_irqsave(&dev->execute_task_lock, flags);
	if (!cmd->state_active) {
		list_add_tail(&cmd->state_list, &dev->state_list);
		cmd->state_active = true;
748
	}
749
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
750 751
}

752
/*
753
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
754
 */
755 756
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
757

758
void target_qf_do_work(struct work_struct *work)
759 760 761
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
762
	LIST_HEAD(qf_cmd_list);
763 764 765
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
766 767
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
768

769
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
770
		list_del(&cmd->se_qf_node);
771
		atomic_dec_mb(&dev->dev_qf_count);
772

773
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
774
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
775
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
776 777
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
778

779 780 781 782
		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
			transport_write_pending_qf(cmd);
		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
			transport_complete_qf(cmd);
783 784 785
	}
}

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
810
	if (dev->export_count)
811
		*bl += sprintf(b + *bl, "ACTIVATED");
812
	else
813 814
		*bl += sprintf(b + *bl, "DEACTIVATED");

815
	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
816
	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
817 818
		dev->dev_attrib.block_size,
		dev->dev_attrib.hw_max_sectors);
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
872
		pr_debug("%s", buf);
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
897 898
	int ret = 0;
	int len;
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
915
		ret = -EINVAL;
916 917 918 919 920 921
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
922
		pr_debug("%s", buf);
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
945 946
	int ret = 0;
	int len;
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
973
		ret = -EINVAL;
974 975 976
		break;
	}

977 978 979
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
980
		strncpy(p_buf, buf, p_buf_len);
981
	} else {
982
		pr_debug("%s", buf);
983
	}
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
1012 1013
		snprintf(buf, sizeof(buf),
			"T10 VPD Binary Device Identifier: %s\n",
1014 1015 1016
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
1017 1018
		snprintf(buf, sizeof(buf),
			"T10 VPD ASCII Device Identifier: %s\n",
1019 1020 1021
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
1022 1023
		snprintf(buf, sizeof(buf),
			"T10 VPD UTF-8 Device Identifier: %s\n",
1024 1025 1026 1027 1028
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1029
		ret = -EINVAL;
1030 1031 1032 1033 1034 1035
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1036
		pr_debug("%s", buf);
1037 1038 1039 1040 1041 1042 1043 1044

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
1045
	int j = 0, i = 4; /* offset to start of the identifier */
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

1078 1079
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	struct se_device *dev = cmd->se_dev;

	if (cmd->unknown_data_length) {
		cmd->data_length = size;
	} else if (size != cmd->data_length) {
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
				cmd->data_length, size, cmd->t_task_cdb[0]);

1091 1092 1093
		if (cmd->data_direction == DMA_TO_DEVICE &&
		    cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
			pr_err("Rejecting underflow/overflow WRITE data\n");
1094
			return TCM_INVALID_CDB_FIELD;
1095 1096 1097 1098 1099
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_CDB.
		 */
1100
		if (dev->dev_attrib.block_size != 512)  {
1101 1102 1103 1104
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
				" CDB on non 512-byte sector setup subsystem"
				" plugin: %s\n", dev->transport->name);
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1105
			return TCM_INVALID_CDB_FIELD;
1106
		}
1107 1108 1109 1110 1111 1112
		/*
		 * For the overflow case keep the existing fabric provided
		 * ->data_length.  Otherwise for the underflow case, reset
		 * ->data_length to the smaller SCSI expected data transfer
		 * length.
		 */
1113 1114 1115 1116 1117 1118
		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
1119
			cmd->data_length = size;
1120 1121 1122 1123 1124 1125 1126
		}
	}

	return 0;

}

1127 1128 1129
/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
1130 1131
 *
 * Preserves the value of @cmd->tag.
1132 1133 1134
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
1135
	const struct target_core_fabric_ops *tfo,
1136 1137 1138 1139 1140 1141
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1142
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1143
	INIT_LIST_HEAD(&cmd->se_qf_node);
1144
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1145
	INIT_LIST_HEAD(&cmd->state_list);
1146
	init_completion(&cmd->t_transport_stop_comp);
1147
	init_completion(&cmd->cmd_wait_comp);
1148
	init_completion(&cmd->task_stop_comp);
1149
	spin_lock_init(&cmd->t_state_lock);
1150
	kref_init(&cmd->cmd_kref);
1151
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1152 1153 1154 1155 1156 1157 1158

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
1159 1160

	cmd->state_active = false;
1161 1162 1163
}
EXPORT_SYMBOL(transport_init_se_cmd);

1164 1165
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
1166
{
1167 1168
	struct se_device *dev = cmd->se_dev;

1169 1170 1171 1172
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1173
	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1174 1175
		return 0;

C
Christoph Hellwig 已提交
1176
	if (cmd->sam_task_attr == TCM_ACA_TAG) {
1177
		pr_debug("SAM Task Attribute ACA"
1178
			" emulation is not supported\n");
1179
		return TCM_INVALID_CDB_FIELD;
1180
	}
1181

1182 1183 1184
	return 0;
}

1185 1186
sense_reason_t
target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1187
{
1188
	struct se_device *dev = cmd->se_dev;
1189
	sense_reason_t ret;
1190 1191 1192 1193 1194 1195

	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1196
		pr_err("Received SCSI CDB with command_size: %d that"
1197 1198
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1199
		return TCM_INVALID_CDB_FIELD;
1200 1201 1202 1203 1204 1205
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1206 1207
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1208
						GFP_KERNEL);
1209 1210
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1211
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1212
				scsi_command_size(cdb),
1213
				(unsigned long)sizeof(cmd->__t_task_cdb));
1214
			return TCM_OUT_OF_RESOURCES;
1215 1216
		}
	} else
1217
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1218
	/*
1219
	 * Copy the original CDB into cmd->
1220
	 */
1221
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1222

1223 1224
	trace_target_sequencer_start(cmd);

1225 1226 1227
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
1228 1229 1230
	ret = target_scsi3_ua_check(cmd);
	if (ret)
		return ret;
1231

C
Christoph Hellwig 已提交
1232
	ret = target_alua_state_check(cmd);
1233 1234
	if (ret)
		return ret;
1235

1236
	ret = target_check_reservation(cmd);
1237 1238
	if (ret) {
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1239
		return ret;
1240
	}
1241

1242
	ret = dev->transport->parse_cdb(cmd);
1243 1244 1245 1246 1247
	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
		pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
				    cmd->se_tfo->get_fabric_name(),
				    cmd->se_sess->se_node_acl->initiatorname,
				    cmd->t_task_cdb[0]);
1248 1249 1250 1251 1252
	if (ret)
		return ret;

	ret = transport_check_alloc_task_attr(cmd);
	if (ret)
1253
		return ret;
1254 1255

	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1256
	atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1257 1258
	return 0;
}
1259
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1260

1261 1262 1263 1264 1265 1266 1267
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1268
	sense_reason_t ret;
1269

1270 1271
	if (!cmd->se_lun) {
		dump_stack();
1272
		pr_err("cmd->se_lun is NULL\n");
1273 1274 1275 1276
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1277
		pr_err("transport_generic_handle_cdb cannot be called"
1278 1279 1280
				" from interrupt context\n");
		return -EINVAL;
	}
1281
	/*
1282 1283 1284
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
	 * outstanding descriptors are handled correctly during shutdown via
	 * transport_wait_for_tasks()
1285 1286 1287 1288 1289
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1290 1291
	cmd->transport_state |= CMD_T_ACTIVE;

1292 1293 1294 1295 1296 1297
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1298 1299
	if (ret)
		transport_generic_request_failure(cmd, ret);
1300
	return 0;
1301 1302 1303
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1304
sense_reason_t
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
	if (!sgl || !sgl_count)
		return 0;

	/*
	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
	 * scatterlists already have been set to follow what the fabric
	 * passes for the original expected data transfer length.
	 */
	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		pr_warn("Rejecting SCSI DATA overflow for fabric using"
			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
		return TCM_INVALID_CDB_FIELD;
	}

	cmd->t_data_sg = sgl;
	cmd->t_data_nents = sgl_count;
1324 1325
	cmd->t_bidi_data_sg = sgl_bidi;
	cmd->t_bidi_data_nents = sgl_bidi_count;
1326 1327 1328 1329 1330

	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	return 0;
}

1331 1332 1333
/*
 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
 * 			 se_cmd + use pre-allocated SGL memory.
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
1344 1345 1346 1347
 * @sgl: struct scatterlist memory for unidirectional mapping
 * @sgl_count: scatterlist count for unidirectional mapping
 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1348 1349
 * @sgl_prot: struct scatterlist memory protection information
 * @sgl_prot_count: scatterlist count for protection information
1350
 *
1351 1352
 * Task tags are supported if the caller has set @se_cmd->tag.
 *
1353 1354 1355 1356
 * Returns non zero to signal active I/O shutdown failure.  All other
 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
 * but still return zero here.
 *
1357 1358
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
1359 1360
 */
int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
H
Hannes Reinecke 已提交
1361
		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1362 1363
		u32 data_length, int task_attr, int data_dir, int flags,
		struct scatterlist *sgl, u32 sgl_count,
1364 1365
		struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
		struct scatterlist *sgl_prot, u32 sgl_prot_count)
1366 1367
{
	struct se_portal_group *se_tpg;
1368 1369
	sense_reason_t rc;
	int ret;
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
1382 1383
	if (flags & TARGET_SCF_UNKNOWN_SIZE)
		se_cmd->unknown_data_length = 1;
1384 1385 1386 1387 1388 1389
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
1390
	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1391 1392
	if (ret)
		return ret;
1393 1394 1395 1396 1397 1398 1399 1400
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1401 1402 1403
	rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
	if (rc) {
		transport_send_check_condition_and_sense(se_cmd, rc, 0);
1404
		target_put_sess_cmd(se_cmd);
1405
		return 0;
1406
	}
1407 1408 1409 1410 1411 1412 1413

	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
	if (rc != 0) {
		transport_generic_request_failure(se_cmd, rc);
		return 0;
	}

1414 1415 1416 1417 1418 1419 1420
	/*
	 * Save pointers for SGLs containing protection information,
	 * if present.
	 */
	if (sgl_prot_count) {
		se_cmd->t_prot_sg = sgl_prot;
		se_cmd->t_prot_nents = sgl_prot_count;
1421
		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1422
	}
1423

1424 1425 1426 1427 1428 1429 1430 1431
	/*
	 * When a non zero sgl_count has been passed perform SGL passthrough
	 * mapping for pre-allocated fabric memory instead of having target
	 * core perform an internal SGL allocation..
	 */
	if (sgl_count != 0) {
		BUG_ON(!sgl);

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
		/*
		 * A work-around for tcm_loop as some userspace code via
		 * scsi-generic do not memset their associated read buffers,
		 * so go ahead and do that here for type non-data CDBs.  Also
		 * note that this is currently guaranteed to be a single SGL
		 * for this case by target core in target_setup_cmd_from_cdb()
		 * -> transport_generic_cmd_sequencer().
		 */
		if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
		     se_cmd->data_direction == DMA_FROM_DEVICE) {
			unsigned char *buf = NULL;

			if (sgl)
				buf = kmap(sg_page(sgl)) + sgl->offset;

			if (buf) {
				memset(buf, 0, sgl->length);
				kunmap(sg_page(sgl));
			}
		}

1453 1454 1455
		rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
				sgl_bidi, sgl_bidi_count);
		if (rc != 0) {
1456
			transport_generic_request_failure(se_cmd, rc);
1457 1458 1459
			return 0;
		}
	}
1460

1461 1462 1463 1464 1465 1466
	/*
	 * Check if we need to delay processing because of ALUA
	 * Active/NonOptimized primary access state..
	 */
	core_alua_check_nonop_delay(se_cmd);

1467
	transport_handle_cdb_direct(se_cmd);
1468
	return 0;
1469
}
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
EXPORT_SYMBOL(target_submit_cmd_map_sgls);

/*
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
1485 1486
 * Task tags are supported if the caller has set @se_cmd->tag.
 *
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
 * Returns non zero to signal active I/O shutdown failure.  All other
 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
 * but still return zero here.
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 *
 * It also assumes interal target core SGL memory allocation.
 */
int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
H
Hannes Reinecke 已提交
1497
		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1498 1499 1500 1501
		u32 data_length, int task_attr, int data_dir, int flags)
{
	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
			unpacked_lun, data_length, task_attr, data_dir,
1502
			flags, NULL, 0, NULL, 0, NULL, 0);
1503
}
1504 1505
EXPORT_SYMBOL(target_submit_cmd);

1506 1507 1508 1509 1510 1511
static void target_complete_tmr_failure(struct work_struct *work)
{
	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);

	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1512 1513

	transport_cmd_check_stop_to_fabric(se_cmd);
1514 1515
}

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1526 1527
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
1528
 * @flags: submit cmd flags
1529 1530 1531 1532
 *
 * Callable from all contexts.
 **/

1533
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
H
Hannes Reinecke 已提交
1534
		unsigned char *sense, u64 unpacked_lun,
1535 1536
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, unsigned int tag, int flags)
1537 1538 1539 1540 1541 1542 1543 1544
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
C
Christoph Hellwig 已提交
1545
			      0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1546 1547 1548 1549
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
1550
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1551 1552
	if (ret < 0)
		return -ENOMEM;
1553

1554 1555 1556
	if (tm_type == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = tag;

1557
	/* See target_submit_cmd for commentary */
1558
	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1559 1560 1561 1562
	if (ret) {
		core_tmr_release_req(se_cmd->se_tmr_req);
		return ret;
	}
1563 1564 1565

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1566 1567 1568 1569 1570 1571
		/*
		 * For callback during failure handling, push this work off
		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
		 */
		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
		schedule_work(&se_cmd->work);
1572
		return 0;
1573 1574
	}
	transport_generic_handle_tmr(se_cmd);
1575
	return 0;
1576 1577 1578
}
EXPORT_SYMBOL(target_submit_tmr);

1579
/*
1580
 * If the cmd is active, request it to be stopped and sleep until it
1581 1582
 * has completed.
 */
1583
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1584 1585
	__releases(&cmd->t_state_lock)
	__acquires(&cmd->t_state_lock)
1586 1587 1588
{
	bool was_active = false;

1589 1590
	if (cmd->transport_state & CMD_T_BUSY) {
		cmd->transport_state |= CMD_T_REQUEST_STOP;
1591 1592
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

1593 1594 1595
		pr_debug("cmd %p waiting to complete\n", cmd);
		wait_for_completion(&cmd->task_stop_comp);
		pr_debug("cmd %p stopped successfully\n", cmd);
1596 1597

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1598 1599
		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
		cmd->transport_state &= ~CMD_T_BUSY;
1600 1601 1602 1603 1604 1605
		was_active = true;
	}

	return was_active;
}

1606 1607 1608
/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1609 1610
void transport_generic_request_failure(struct se_cmd *cmd,
		sense_reason_t sense_reason)
1611
{
1612 1613
	int ret = 0;

1614 1615
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
		" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1616
	pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1617
		cmd->se_tfo->get_cmd_state(cmd),
1618
		cmd->t_state, sense_reason);
1619
	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1620 1621 1622
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1623 1624 1625 1626

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
1627
	transport_complete_task_attr(cmd);
1628 1629
	/*
	 * Handle special case for COMPARE_AND_WRITE failure, where the
1630
	 * callback is expected to drop the per device ->caw_sem.
1631 1632 1633
	 */
	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
	     cmd->transport_complete_callback)
1634
		cmd->transport_complete_callback(cmd, false);
1635

1636
	switch (sense_reason) {
1637 1638 1639 1640
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
1641
	case TCM_PARAMETER_LIST_LENGTH_ERROR:
1642 1643 1644
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
1645
	case TCM_ADDRESS_OUT_OF_RANGE:
1646 1647 1648
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1649 1650 1651
	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1652
		break;
1653 1654 1655
	case TCM_OUT_OF_RESOURCES:
		sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		break;
1656
	case TCM_RESERVATION_CONFLICT:
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
1671
		if (cmd->se_sess &&
1672 1673 1674 1675 1676
		    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
					       cmd->orig_fe_lun, 0x2C,
					ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
		}
1677
		trace_target_cmd_complete(cmd);
1678
		ret = cmd->se_tfo->queue_status(cmd);
1679
		if (ret == -EAGAIN || ret == -ENOMEM)
1680
			goto queue_full;
1681 1682
		goto check_stop;
	default:
1683
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1684 1685
			cmd->t_task_cdb[0], sense_reason);
		sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1686 1687
		break;
	}
1688

1689
	ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1690 1691
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
1692

1693 1694
check_stop:
	transport_lun_remove_cmd(cmd);
1695
	if (!transport_cmd_check_stop_to_fabric(cmd))
1696
		;
1697 1698 1699
	return;

queue_full:
1700 1701
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
1702
}
1703
EXPORT_SYMBOL(transport_generic_request_failure);
1704

1705
void __target_execute_cmd(struct se_cmd *cmd)
1706
{
1707
	sense_reason_t ret;
1708

1709 1710 1711 1712 1713 1714
	if (cmd->execute_cmd) {
		ret = cmd->execute_cmd(cmd);
		if (ret) {
			spin_lock_irq(&cmd->t_state_lock);
			cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
			spin_unlock_irq(&cmd->t_state_lock);
1715

1716 1717
			transport_generic_request_failure(cmd, ret);
		}
1718 1719 1720
	}
}

1721 1722
static int target_write_prot_action(struct se_cmd *cmd)
{
1723
	u32 sectors;
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	/*
	 * Perform WRITE_INSERT of PI using software emulation when backend
	 * device has PI enabled, if the transport has not already generated
	 * PI using hardware WRITE_INSERT offload.
	 */
	switch (cmd->prot_op) {
	case TARGET_PROT_DOUT_INSERT:
		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
			sbc_dif_generate(cmd);
		break;
1734 1735 1736 1737 1738
	case TARGET_PROT_DOUT_STRIP:
		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
			break;

		sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1739 1740
		cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
					     sectors, 0, cmd->t_prot_sg, 0);
1741 1742
		if (unlikely(cmd->pi_err)) {
			spin_lock_irq(&cmd->t_state_lock);
1743
			cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1744 1745 1746 1747 1748
			spin_unlock_irq(&cmd->t_state_lock);
			transport_generic_request_failure(cmd, cmd->pi_err);
			return -1;
		}
		break;
1749 1750 1751 1752 1753 1754 1755
	default:
		break;
	}

	return 0;
}

1756
static bool target_handle_task_attr(struct se_cmd *cmd)
1757 1758 1759
{
	struct se_device *dev = cmd->se_dev;

1760
	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1761
		return false;
1762

1763
	/*
L
Lucas De Marchi 已提交
1764
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1765 1766
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
1767
	switch (cmd->sam_task_attr) {
C
Christoph Hellwig 已提交
1768
	case TCM_HEAD_TAG:
1769 1770
		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
			 cmd->t_task_cdb[0]);
1771
		return false;
C
Christoph Hellwig 已提交
1772
	case TCM_ORDERED_TAG:
1773
		atomic_inc_mb(&dev->dev_ordered_sync);
1774

1775 1776
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
			 cmd->t_task_cdb[0]);
1777

1778
		/*
1779 1780
		 * Execute an ORDERED command if no other older commands
		 * exist that need to be completed first.
1781
		 */
1782
		if (!atomic_read(&dev->simple_cmds))
1783
			return false;
1784 1785
		break;
	default:
1786 1787 1788
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
1789
		atomic_inc_mb(&dev->simple_cmds);
1790
		break;
1791
	}
1792

1793 1794
	if (atomic_read(&dev->dev_ordered_sync) == 0)
		return false;
1795

1796 1797 1798 1799
	spin_lock(&dev->delayed_cmd_lock);
	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
	spin_unlock(&dev->delayed_cmd_lock);

1800 1801
	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
		cmd->t_task_cdb[0], cmd->sam_task_attr);
1802 1803 1804 1805 1806 1807 1808 1809
	return true;
}

void target_execute_cmd(struct se_cmd *cmd)
{
	/*
	 * If the received CDB has aleady been aborted stop processing it here.
	 */
1810
	if (transport_check_aborted_status(cmd, 1))
1811
		return;
1812

1813 1814 1815 1816
	/*
	 * Determine if frontend context caller is requesting the stopping of
	 * this command for frontend exceptions.
	 */
1817
	spin_lock_irq(&cmd->t_state_lock);
1818
	if (cmd->transport_state & CMD_T_STOP) {
1819 1820
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
			__func__, __LINE__, cmd->tag);
1821 1822

		spin_unlock_irq(&cmd->t_state_lock);
1823
		complete_all(&cmd->t_transport_stop_comp);
1824 1825 1826 1827
		return;
	}

	cmd->t_state = TRANSPORT_PROCESSING;
1828
	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1829
	spin_unlock_irq(&cmd->t_state_lock);
1830 1831 1832

	if (target_write_prot_action(cmd))
		return;
1833

1834 1835
	if (target_handle_task_attr(cmd)) {
		spin_lock_irq(&cmd->t_state_lock);
1836
		cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1837 1838 1839 1840 1841
		spin_unlock_irq(&cmd->t_state_lock);
		return;
	}

	__target_execute_cmd(cmd);
1842
}
1843
EXPORT_SYMBOL(target_execute_cmd);
1844

1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
/*
 * Process all commands up to the last received ORDERED task attribute which
 * requires another blocking boundary
 */
static void target_restart_delayed_cmds(struct se_device *dev)
{
	for (;;) {
		struct se_cmd *cmd;

		spin_lock(&dev->delayed_cmd_lock);
		if (list_empty(&dev->delayed_cmd_list)) {
			spin_unlock(&dev->delayed_cmd_lock);
			break;
		}

		cmd = list_entry(dev->delayed_cmd_list.next,
				 struct se_cmd, se_delayed_node);
		list_del(&cmd->se_delayed_node);
		spin_unlock(&dev->delayed_cmd_lock);

		__target_execute_cmd(cmd);

C
Christoph Hellwig 已提交
1867
		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1868 1869 1870 1871
			break;
	}
}

1872
/*
1873
 * Called from I/O completion to determine which dormant/delayed
1874 1875 1876 1877
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
1878
	struct se_device *dev = cmd->se_dev;
1879

1880
	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1881 1882
		return;

C
Christoph Hellwig 已提交
1883
	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1884
		atomic_dec_mb(&dev->simple_cmds);
1885
		dev->dev_cur_ordered_id++;
1886 1887
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
			 dev->dev_cur_ordered_id);
C
Christoph Hellwig 已提交
1888
	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1889
		dev->dev_cur_ordered_id++;
1890 1891
		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
			 dev->dev_cur_ordered_id);
C
Christoph Hellwig 已提交
1892
	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1893
		atomic_dec_mb(&dev->dev_ordered_sync);
1894 1895

		dev->dev_cur_ordered_id++;
1896 1897
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
			 dev->dev_cur_ordered_id);
1898 1899
	}

1900
	target_restart_delayed_cmds(dev);
1901 1902
}

1903
static void transport_complete_qf(struct se_cmd *cmd)
1904 1905 1906
{
	int ret = 0;

1907
	transport_complete_task_attr(cmd);
1908 1909

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1910
		trace_target_cmd_complete(cmd);
1911
		ret = cmd->se_tfo->queue_status(cmd);
1912
		goto out;
1913
	}
1914 1915 1916

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
1917
		trace_target_cmd_complete(cmd);
1918 1919 1920
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
1921
		if (cmd->se_cmd_flags & SCF_BIDI) {
1922
			ret = cmd->se_tfo->queue_data_in(cmd);
1923
			break;
1924 1925 1926
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
1927
		trace_target_cmd_complete(cmd);
1928 1929 1930 1931 1932 1933
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

1934 1935 1936 1937 1938 1939 1940
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
1941 1942 1943 1944
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
1945
	struct se_device *dev)
1946 1947 1948
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1949
	atomic_inc_mb(&dev->dev_qf_count);
1950 1951 1952 1953 1954
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

1955
static bool target_read_prot_action(struct se_cmd *cmd)
1956
{
1957 1958 1959
	switch (cmd->prot_op) {
	case TARGET_PROT_DIN_STRIP:
		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
1960 1961 1962 1963 1964 1965 1966
			u32 sectors = cmd->data_length >>
				  ilog2(cmd->se_dev->dev_attrib.block_size);

			cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
						     sectors, 0, cmd->t_prot_sg,
						     0);
			if (cmd->pi_err)
1967
				return true;
1968
		}
1969
		break;
1970 1971 1972 1973 1974 1975
	case TARGET_PROT_DIN_INSERT:
		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
			break;

		sbc_dif_generate(cmd);
		break;
1976 1977
	default:
		break;
1978 1979 1980 1981 1982
	}

	return false;
}

1983
static void target_complete_ok_work(struct work_struct *work)
1984
{
1985
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
1986
	int ret;
1987

1988 1989 1990 1991 1992
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
1993 1994
	transport_complete_task_attr(cmd);

1995 1996 1997 1998 1999 2000 2001
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

2002
	/*
2003
	 * Check if we need to send a sense buffer from
2004 2005 2006
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2007 2008 2009 2010 2011 2012 2013 2014 2015
		WARN_ON(!cmd->scsi_status);
		ret = transport_send_check_condition_and_sense(
					cmd, 0, 1);
		if (ret == -EAGAIN || ret == -ENOMEM)
			goto queue_full;

		transport_lun_remove_cmd(cmd);
		transport_cmd_check_stop_to_fabric(cmd);
		return;
2016 2017
	}
	/*
L
Lucas De Marchi 已提交
2018
	 * Check for a callback, used by amongst other things
2019
	 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2020
	 */
2021 2022 2023
	if (cmd->transport_complete_callback) {
		sense_reason_t rc;

2024
		rc = cmd->transport_complete_callback(cmd, true);
2025
		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
2026 2027 2028 2029
			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
			    !cmd->data_length)
				goto queue_rsp;

2030
			return;
2031 2032 2033 2034 2035
		} else if (rc) {
			ret = transport_send_check_condition_and_sense(cmd,
						rc, 0);
			if (ret == -EAGAIN || ret == -ENOMEM)
				goto queue_full;
2036

2037 2038 2039 2040
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
2041
	}
2042

2043
queue_rsp:
2044 2045
	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
2046 2047
		atomic_long_add(cmd->data_length,
				&cmd->se_lun->lun_stats.tx_data_octets);
2048 2049 2050 2051 2052
		/*
		 * Perform READ_STRIP of PI using software emulation when
		 * backend had PI enabled, if the transport will not be
		 * performing hardware READ_STRIP offload.
		 */
2053
		if (target_read_prot_action(cmd)) {
2054 2055 2056 2057 2058 2059 2060 2061 2062
			ret = transport_send_check_condition_and_sense(cmd,
						cmd->pi_err, 0);
			if (ret == -EAGAIN || ret == -ENOMEM)
				goto queue_full;

			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
2063

2064
		trace_target_cmd_complete(cmd);
2065
		ret = cmd->se_tfo->queue_data_in(cmd);
2066
		if (ret == -EAGAIN || ret == -ENOMEM)
2067
			goto queue_full;
2068 2069
		break;
	case DMA_TO_DEVICE:
2070 2071
		atomic_long_add(cmd->data_length,
				&cmd->se_lun->lun_stats.rx_data_octets);
2072 2073 2074
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
2075
		if (cmd->se_cmd_flags & SCF_BIDI) {
2076 2077
			atomic_long_add(cmd->data_length,
					&cmd->se_lun->lun_stats.tx_data_octets);
2078
			ret = cmd->se_tfo->queue_data_in(cmd);
2079
			if (ret == -EAGAIN || ret == -ENOMEM)
2080
				goto queue_full;
2081 2082 2083 2084
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
2085
		trace_target_cmd_complete(cmd);
2086
		ret = cmd->se_tfo->queue_status(cmd);
2087
		if (ret == -EAGAIN || ret == -ENOMEM)
2088
			goto queue_full;
2089 2090 2091 2092 2093 2094 2095
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
2096 2097 2098
	return;

queue_full:
2099
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2100
		" data_direction: %d\n", cmd, cmd->data_direction);
2101 2102
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
2103 2104
}

2105
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2106
{
2107 2108
	struct scatterlist *sg;
	int count;
2109

2110 2111
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
2112

2113 2114
	kfree(sgl);
}
2115

2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{
	/*
	 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
	 * emulation, and free + reset pointers if necessary..
	 */
	if (!cmd->t_data_sg_orig)
		return;

	kfree(cmd->t_data_sg);
	cmd->t_data_sg = cmd->t_data_sg_orig;
	cmd->t_data_sg_orig = NULL;
	cmd->t_data_nents = cmd->t_data_nents_orig;
	cmd->t_data_nents_orig = 0;
}

2132 2133
static inline void transport_free_pages(struct se_cmd *cmd)
{
2134 2135 2136 2137 2138 2139
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
		transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
		cmd->t_prot_sg = NULL;
		cmd->t_prot_nents = 0;
	}

2140
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
		/*
		 * Release special case READ buffer payload required for
		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
		 */
		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
			transport_free_sgl(cmd->t_bidi_data_sg,
					   cmd->t_bidi_data_nents);
			cmd->t_bidi_data_sg = NULL;
			cmd->t_bidi_data_nents = 0;
		}
2151
		transport_reset_sgl_orig(cmd);
2152
		return;
2153 2154
	}
	transport_reset_sgl_orig(cmd);
2155 2156

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2157 2158
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
2159

2160
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2161 2162
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
2163 2164
}

C
Christoph Hellwig 已提交
2165 2166 2167 2168 2169 2170 2171
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
2172
static int transport_release_cmd(struct se_cmd *cmd)
C
Christoph Hellwig 已提交
2173 2174 2175
{
	BUG_ON(!cmd->se_tfo);

2176
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
2177 2178 2179 2180
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
2181 2182
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
2183
	 */
2184
	return target_put_sess_cmd(cmd);
C
Christoph Hellwig 已提交
2185 2186
}

2187 2188 2189 2190 2191 2192
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
2193
static int transport_put_cmd(struct se_cmd *cmd)
2194 2195
{
	transport_free_pages(cmd);
2196
	return transport_release_cmd(cmd);
2197 2198
}

2199
void *transport_kmap_data_sg(struct se_cmd *cmd)
2200
{
2201
	struct scatterlist *sg = cmd->t_data_sg;
2202 2203
	struct page **pages;
	int i;
2204 2205

	/*
2206 2207 2208
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2209
	 */
2210 2211
	if (!cmd->t_data_nents)
		return NULL;
2212 2213 2214

	BUG_ON(!sg);
	if (cmd->t_data_nents == 1)
2215 2216 2217 2218
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2219
	if (!pages)
2220 2221 2222 2223 2224 2225 2226 2227 2228
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
2229
	if (!cmd->t_data_vmap)
2230 2231 2232
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2233
}
2234
EXPORT_SYMBOL(transport_kmap_data_sg);
2235

2236
void transport_kunmap_data_sg(struct se_cmd *cmd)
2237
{
2238
	if (!cmd->t_data_nents) {
2239
		return;
2240
	} else if (cmd->t_data_nents == 1) {
2241
		kunmap(sg_page(cmd->t_data_sg));
2242 2243
		return;
	}
2244 2245 2246

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
2247
}
2248
EXPORT_SYMBOL(transport_kunmap_data_sg);
2249

2250
int
2251 2252
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
		 bool zero_page)
2253
{
2254
	struct scatterlist *sg;
2255
	struct page *page;
2256 2257
	gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
	unsigned int nent;
2258
	int i = 0;
2259

2260 2261 2262
	nent = DIV_ROUND_UP(length, PAGE_SIZE);
	sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
	if (!sg)
2263
		return -ENOMEM;
2264

2265
	sg_init_table(sg, nent);
2266

2267 2268
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
2269
		page = alloc_page(GFP_KERNEL | zero_flag);
2270 2271
		if (!page)
			goto out;
2272

2273
		sg_set_page(&sg[i], page, page_len, 0);
2274 2275
		length -= page_len;
		i++;
2276
	}
2277 2278
	*sgl = sg;
	*nents = nent;
2279 2280
	return 0;

2281
out:
2282
	while (i > 0) {
2283
		i--;
2284
		__free_page(sg_page(&sg[i]));
2285
	}
2286
	kfree(sg);
2287
	return -ENOMEM;
2288 2289
}

2290
/*
2291 2292 2293
 * Allocate any required resources to execute the command.  For writes we
 * might not have the payload yet, so notify the fabric via a call to
 * ->write_pending instead. Otherwise place it on the execution queue.
2294
 */
2295 2296
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
2297 2298
{
	int ret = 0;
2299
	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2300

2301 2302 2303 2304 2305 2306 2307 2308
	if (cmd->prot_op != TARGET_PROT_NORMAL &&
	    !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
		ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
				       cmd->prot_length, true);
		if (ret < 0)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}

2309 2310 2311
	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2312
	 * beforehand.
2313
	 */
2314 2315
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
2316

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
		if ((cmd->se_cmd_flags & SCF_BIDI) ||
		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
			u32 bidi_length;

			if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
				bidi_length = cmd->t_task_nolb *
					      cmd->se_dev->dev_attrib.block_size;
			else
				bidi_length = cmd->data_length;

			ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
					       &cmd->t_bidi_data_nents,
					       bidi_length, zero_flag);
			if (ret < 0)
				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		}

2334 2335
		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
				       cmd->data_length, zero_flag);
2336
		if (ret < 0)
2337
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
		    cmd->data_length) {
		/*
		 * Special case for COMPARE_AND_WRITE with fabrics
		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
		 */
		u32 caw_length = cmd->t_task_nolb *
				 cmd->se_dev->dev_attrib.block_size;

		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
				       &cmd->t_bidi_data_nents,
				       caw_length, zero_flag);
		if (ret < 0)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2352 2353
	}
	/*
2354 2355 2356
	 * If this command is not a write we can execute it right here,
	 * for write buffers we need to notify the fabric driver first
	 * and let it call back once the write buffers are ready.
2357
	 */
2358
	target_add_to_state_list(cmd);
2359
	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2360 2361 2362
		target_execute_cmd(cmd);
		return 0;
	}
2363
	transport_cmd_check_stop(cmd, false, true);
2364 2365 2366 2367 2368

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;

2369 2370 2371
	/* fabric drivers should only return -EAGAIN or -ENOMEM as error */
	WARN_ON(ret);

2372
	return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2373

2374 2375 2376 2377 2378
queue_full:
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
	transport_handle_queue_full(cmd, cmd->se_dev);
	return 0;
2379
}
2380
EXPORT_SYMBOL(transport_generic_new_cmd);
2381

2382
static void transport_write_pending_qf(struct se_cmd *cmd)
2383
{
2384 2385 2386 2387
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
2388 2389 2390 2391
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
2392 2393
}

2394
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2395
{
2396
	unsigned long flags;
2397 2398
	int ret = 0;

2399
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2400
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2401 2402
			 transport_wait_for_tasks(cmd);

2403
		ret = transport_release_cmd(cmd);
2404 2405 2406
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
		/*
		 * Handle WRITE failure case where transport_generic_new_cmd()
		 * has already added se_cmd to state_list, but fabric has
		 * failed command before I/O submission.
		 */
		if (cmd->state_active) {
			spin_lock_irqsave(&cmd->t_state_lock, flags);
			target_remove_from_state_list(cmd);
			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		}
2417

2418
		if (cmd->se_lun)
2419 2420
			transport_lun_remove_cmd(cmd);

2421
		ret = transport_put_cmd(cmd);
2422
	}
2423
	return ret;
2424 2425 2426
}
EXPORT_SYMBOL(transport_generic_free_cmd);

2427 2428
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_cmd:	command descriptor to add
2429
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2430
 */
2431
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2432
{
2433
	struct se_session *se_sess = se_cmd->se_sess;
2434
	unsigned long flags;
2435
	int ret = 0;
2436

2437 2438 2439 2440 2441
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
2442
	if (ack_kref)
2443
		kref_get(&se_cmd->cmd_kref);
2444

2445
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2446 2447 2448 2449
	if (se_sess->sess_tearing_down) {
		ret = -ESHUTDOWN;
		goto out;
	}
2450
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2451
out:
2452
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2453 2454

	if (ret && ack_kref)
2455
		target_put_sess_cmd(se_cmd);
2456

2457
	return ret;
2458
}
2459
EXPORT_SYMBOL(target_get_sess_cmd);
2460

2461
static void target_release_cmd_kref(struct kref *kref)
2462
		__releases(&se_cmd->se_sess->sess_cmd_lock)
2463
{
2464 2465
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
2466 2467

	if (list_empty(&se_cmd->se_cmd_list)) {
2468
		spin_unlock(&se_sess->sess_cmd_lock);
2469
		se_cmd->se_tfo->release_cmd(se_cmd);
2470
		return;
2471 2472
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2473
		spin_unlock(&se_sess->sess_cmd_lock);
2474
		complete(&se_cmd->cmd_wait_comp);
2475
		return;
2476 2477
	}
	list_del(&se_cmd->se_cmd_list);
2478
	spin_unlock(&se_sess->sess_cmd_lock);
2479

2480 2481 2482 2483 2484 2485
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_cmd:	command descriptor to drop
 */
2486
int target_put_sess_cmd(struct se_cmd *se_cmd)
2487
{
2488 2489
	struct se_session *se_sess = se_cmd->se_sess;

2490 2491 2492 2493
	if (!se_sess) {
		se_cmd->se_tfo->release_cmd(se_cmd);
		return 1;
	}
2494 2495
	return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
			&se_sess->sess_cmd_lock);
2496 2497 2498
}
EXPORT_SYMBOL(target_put_sess_cmd);

2499 2500 2501 2502
/* target_sess_cmd_list_set_waiting - Flag all commands in
 *         sess_cmd_list to complete cmd_wait_comp.  Set
 *         sess_tearing_down so no more commands are queued.
 * @se_sess:	session to flag
2503
 */
2504
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2505 2506 2507 2508 2509
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2510 2511 2512 2513
	if (se_sess->sess_tearing_down) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		return;
	}
2514
	se_sess->sess_tearing_down = 1;
2515
	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2516

2517
	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
2518 2519 2520 2521
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
2522
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2523 2524 2525 2526

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 */
2527
void target_wait_for_sess_cmds(struct se_session *se_sess)
2528 2529
{
	struct se_cmd *se_cmd, *tmp_cmd;
2530
	unsigned long flags;
2531 2532

	list_for_each_entry_safe(se_cmd, tmp_cmd,
2533
				&se_sess->sess_wait_list, se_cmd_list) {
2534 2535 2536 2537 2538 2539
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

2540 2541 2542 2543
		wait_for_completion(&se_cmd->cmd_wait_comp);
		pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
			" fabric state: %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));
2544 2545 2546

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
2547 2548 2549 2550 2551

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	WARN_ON(!list_empty(&se_sess->sess_cmd_list));
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

2552 2553 2554
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

2555
void transport_clear_lun_ref(struct se_lun *lun)
2556
{
2557 2558
	percpu_ref_kill(&lun->lun_ref);
	wait_for_completion(&lun->lun_ref_comp);
2559 2560
}

2561 2562 2563
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
2564
 *
2565 2566
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
2567
 */
2568
bool transport_wait_for_tasks(struct se_cmd *cmd)
2569 2570 2571
{
	unsigned long flags;

2572
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2573 2574
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2575
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2576
		return false;
2577
	}
2578

2579 2580
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2581
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2582
		return false;
2583
	}
2584

2585
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2586
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2587
		return false;
2588
	}
2589

2590
	cmd->transport_state |= CMD_T_STOP;
2591

2592 2593
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
		cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2594

2595
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2596

2597
	wait_for_completion(&cmd->t_transport_stop_comp);
2598

2599
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2600
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2601

2602 2603
	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
		cmd->tag);
2604

2605
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2606 2607

	return true;
2608
}
2609
EXPORT_SYMBOL(transport_wait_for_tasks);
2610

2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
struct sense_info {
	u8 key;
	u8 asc;
	u8 ascq;
	bool add_sector_info;
};

static const struct sense_info sense_info_table[] = {
	[TCM_NO_SENSE] = {
		.key = NOT_READY
	},
	[TCM_NON_EXISTENT_LUN] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
	},
	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
	},
	[TCM_SECTOR_COUNT_TOO_MANY] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
	},
	[TCM_UNKNOWN_MODE_PAGE] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x24, /* INVALID FIELD IN CDB */
	},
	[TCM_CHECK_CONDITION_ABORT_CMD] = {
		.key = ABORTED_COMMAND,
		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
		.ascq = 0x03,
	},
	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
		.key = ABORTED_COMMAND,
		.asc = 0x0c, /* WRITE ERROR */
		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
	},
	[TCM_INVALID_CDB_FIELD] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x24, /* INVALID FIELD IN CDB */
	},
	[TCM_INVALID_PARAMETER_LIST] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
	},
	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
	},
	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x0c, /* WRITE ERROR */
		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
	},
	[TCM_SERVICE_CRC_ERROR] = {
		.key = ABORTED_COMMAND,
		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
		.ascq = 0x05, /* N/A */
	},
	[TCM_SNACK_REJECTED] = {
		.key = ABORTED_COMMAND,
		.asc = 0x11, /* READ ERROR */
		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
	},
	[TCM_WRITE_PROTECTED] = {
		.key = DATA_PROTECT,
		.asc = 0x27, /* WRITE PROTECTED */
	},
	[TCM_ADDRESS_OUT_OF_RANGE] = {
		.key = ILLEGAL_REQUEST,
		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
	},
	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
		.key = UNIT_ATTENTION,
	},
	[TCM_CHECK_CONDITION_NOT_READY] = {
		.key = NOT_READY,
	},
	[TCM_MISCOMPARE_VERIFY] = {
		.key = MISCOMPARE,
		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
		.ascq = 0x00,
	},
	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2695
		.key = ABORTED_COMMAND,
2696 2697 2698 2699 2700
		.asc = 0x10,
		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
		.add_sector_info = true,
	},
	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2701
		.key = ABORTED_COMMAND,
2702 2703 2704 2705 2706
		.asc = 0x10,
		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
		.add_sector_info = true,
	},
	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2707
		.key = ABORTED_COMMAND,
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
		.asc = 0x10,
		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
		.add_sector_info = true,
	},
	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
		/*
		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
		 * Solaris initiators.  Returning NOT READY instead means the
		 * operations will be retried a finite number of times and we
		 * can survive intermittent errors.
		 */
		.key = NOT_READY,
		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
	},
};

2724
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2725 2726 2727 2728 2729
{
	const struct sense_info *si;
	u8 *buffer = cmd->sense_buffer;
	int r = (__force int)reason;
	u8 asc, ascq;
2730
	bool desc_format = target_sense_desc_format(cmd->se_dev);
2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748

	if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
		si = &sense_info_table[r];
	else
		si = &sense_info_table[(__force int)
				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];

	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		WARN_ON_ONCE(asc == 0);
	} else if (si->asc == 0) {
		WARN_ON_ONCE(cmd->scsi_asc == 0);
		asc = cmd->scsi_asc;
		ascq = cmd->scsi_ascq;
	} else {
		asc = si->asc;
		ascq = si->ascq;
	}
2749

2750
	scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2751
	if (si->add_sector_info)
2752 2753 2754 2755 2756
		return scsi_set_sense_information(buffer,
						  cmd->scsi_sense_length,
						  cmd->bad_sector);

	return 0;
2757 2758
}

2759 2760 2761
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
		sense_reason_t reason, int from_transport)
2762 2763 2764
{
	unsigned long flags;

2765
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2766
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2767
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2768 2769 2770
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2771
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2772

2773
	if (!from_transport) {
2774 2775
		int rc;

2776
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2777 2778
		cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
		cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
2779 2780 2781
		rc = translate_sense_reason(cmd, reason);
		if (rc)
			return rc;
2782 2783
	}

2784
	trace_target_cmd_complete(cmd);
2785
	return cmd->se_tfo->queue_status(cmd);
2786 2787 2788 2789 2790
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
2791 2792
	if (!(cmd->transport_state & CMD_T_ABORTED))
		return 0;
2793

2794 2795 2796 2797 2798
	/*
	 * If cmd has been aborted but either no status is to be sent or it has
	 * already been sent, just return
	 */
	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
2799
		return 1;
2800

2801 2802
	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
		 cmd->t_task_cdb[0], cmd->tag);
2803

2804
	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2805
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2806
	trace_target_cmd_complete(cmd);
2807 2808 2809
	cmd->se_tfo->queue_status(cmd);

	return 1;
2810 2811 2812 2813 2814
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
2815 2816 2817
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
2818
	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
2819 2820 2821 2822 2823
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

2824 2825 2826 2827 2828 2829 2830
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
2831
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2832
			cmd->transport_state |= CMD_T_ABORTED;
2833
			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2834
			return;
2835 2836 2837
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2838

2839 2840
	transport_lun_remove_cmd(cmd);

2841 2842
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
		 cmd->t_task_cdb[0], cmd->tag);
2843

2844
	trace_target_cmd_complete(cmd);
2845
	cmd->se_tfo->queue_status(cmd);
2846 2847
}

2848
static void target_tmr_work(struct work_struct *work)
2849
{
2850
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2851
	struct se_device *dev = cmd->se_dev;
2852 2853 2854 2855
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
2856
	case TMR_ABORT_TASK:
2857
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
2858
		break;
2859 2860 2861
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
2862 2863
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
2864
	case TMR_LUN_RESET:
2865 2866 2867
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
2868 2869 2870 2871 2872
		if (tmr->response == TMR_FUNCTION_COMPLETE) {
			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
					       cmd->orig_fe_lun, 0x29,
					       ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
		}
2873
		break;
2874
	case TMR_TARGET_WARM_RESET:
2875 2876
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
2877
	case TMR_TARGET_COLD_RESET:
2878 2879 2880
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
2881
		pr_err("Uknown TMR function: 0x%02x.\n",
2882 2883 2884 2885 2886 2887
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
2888
	cmd->se_tfo->queue_tm_rsp(cmd);
2889

2890
	transport_cmd_check_stop_to_fabric(cmd);
2891 2892
}

2893 2894
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
2895
{
2896 2897 2898 2899 2900 2901
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	cmd->transport_state |= CMD_T_ACTIVE;
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

2902 2903
	INIT_WORK(&cmd->work, target_tmr_work);
	queue_work(cmd->se_dev->tmr_wq, &cmd->work);
2904 2905
	return 0;
}
2906
EXPORT_SYMBOL(transport_generic_handle_tmr);
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925

bool
target_check_wce(struct se_device *dev)
{
	bool wce = false;

	if (dev->transport->get_write_cache)
		wce = dev->transport->get_write_cache(dev);
	else if (dev->dev_attrib.emulate_write_cache > 0)
		wce = true;

	return wce;
}

bool
target_check_fua(struct se_device *dev)
{
	return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
}