target_core_transport.c 129.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40
#include <linux/ratelimit.h>
41 42 43 44 45
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
46
#include <scsi/scsi_tcq.h>
47 48

#include <target/target_core_base.h>
49 50
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
51 52
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
53
#include "target_core_internal.h"
54 55 56 57
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

58
static int sub_api_initialized;
59

60
static struct workqueue_struct *target_completion_wq;
61 62 63 64 65 66 67 68 69
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static int transport_generic_write_pending(struct se_cmd *);
70
static int transport_processing_thread(void *param);
71
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72
static void transport_complete_task_attr(struct se_cmd *cmd);
73
static void transport_handle_queue_full(struct se_cmd *cmd,
74
		struct se_device *dev);
75
static void transport_free_dev_tasks(struct se_cmd *cmd);
76
static int transport_generic_get_mem(struct se_cmd *cmd);
77
static void transport_put_cmd(struct se_cmd *cmd);
78
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
79
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
80
static void target_complete_ok_work(struct work_struct *work);
81

82
int init_se_kmem_caches(void)
83 84 85 86
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
87 88
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
89
				" failed\n");
90
		goto out;
91 92 93 94
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
95 96
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
97
		goto out_free_sess_cache;
98 99 100 101
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
102 103
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
104
				" failed\n");
105
		goto out_free_ua_cache;
106 107 108 109
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
110 111
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
112
				" failed\n");
113
		goto out_free_pr_reg_cache;
114 115 116 117
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
118 119
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
120
				"cache failed\n");
121
		goto out_free_lu_gp_cache;
122 123 124 125
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
126 127
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
128
				"cache failed\n");
129
		goto out_free_lu_gp_mem_cache;
130 131 132 133 134 135
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
136 137
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
138
				"mem_t failed\n");
139
		goto out_free_tg_pt_gp_cache;
140 141
	}

142 143 144 145 146
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

147
	return 0;
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
163
out:
164
	return -ENOMEM;
165 166
}

167
void release_se_kmem_caches(void)
168
{
169
	destroy_workqueue(target_completion_wq);
170 171 172 173 174 175 176 177 178
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

179 180 181
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182 183 184 185 186 187 188 189

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

190
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
191

192 193 194
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
195 196 197 198

	return new_index;
}

C
Christoph Hellwig 已提交
199
static void transport_init_queue_obj(struct se_queue_obj *qobj)
200 201 202 203 204 205 206
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}

207
void transport_subsystem_check_init(void)
208 209 210
{
	int ret;

211 212 213
	if (sub_api_initialized)
		return;

214 215
	ret = request_module("target_core_iblock");
	if (ret != 0)
216
		pr_err("Unable to load target_core_iblock\n");
217 218 219

	ret = request_module("target_core_file");
	if (ret != 0)
220
		pr_err("Unable to load target_core_file\n");
221 222 223

	ret = request_module("target_core_pscsi");
	if (ret != 0)
224
		pr_err("Unable to load target_core_pscsi\n");
225 226 227

	ret = request_module("target_core_stgt");
	if (ret != 0)
228
		pr_err("Unable to load target_core_stgt\n");
229

230
	sub_api_initialized = 1;
231
	return;
232 233 234 235 236 237 238
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
239 240
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
241 242 243 244 245
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
246 247 248
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
249
	kref_init(&se_sess->sess_kref);
250 251 252 253 254 255

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
256
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
279
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
280
			memset(&buf[0], 0, PR_REG_ISID_LEN);
281
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
282 283 284
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
285 286
		kref_get(&se_nacl->acl_kref);

287 288 289 290 291 292 293 294 295 296 297 298 299
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

300
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
301
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
302 303 304 305 306 307 308 309 310
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
311 312 313
	unsigned long flags;

	spin_lock_irqsave(&se_tpg->session_lock, flags);
314
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
315
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
316 317 318
}
EXPORT_SYMBOL(transport_register_session);

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static void target_release_session(struct kref *kref)
{
	struct se_session *se_sess = container_of(kref,
			struct se_session, sess_kref);
	struct se_portal_group *se_tpg = se_sess->se_tpg;

	se_tpg->se_tpg_tfo->close_session(se_sess);
}

void target_get_session(struct se_session *se_sess)
{
	kref_get(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);

int target_put_session(struct se_session *se_sess)
{
	return kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);

340 341 342 343 344 345 346 347 348 349 350 351 352
static void target_complete_nacl(struct kref *kref)
{
	struct se_node_acl *nacl = container_of(kref,
				struct se_node_acl, acl_kref);

	complete(&nacl->acl_free_comp);
}

void target_put_nacl(struct se_node_acl *nacl)
{
	kref_put(&nacl->acl_kref, target_complete_nacl);
}

353 354 355
void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
356
	unsigned long flags;
357 358 359 360
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
361
	if (se_nacl) {
362
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
363 364
		if (se_nacl->acl_stop == 0)
			list_del(&se_sess->sess_acl_list);
365 366 367 368 369 370 371 372 373 374 375 376
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
377
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
378 379 380 381 382 383 384 385 386 387 388 389 390
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
391
	struct target_core_fabric_ops *se_tfo;
392
	struct se_node_acl *se_nacl;
393
	unsigned long flags;
394
	bool comp_nacl = true;
395

396
	if (!se_tpg) {
397 398 399
		transport_free_session(se_sess);
		return;
	}
400
	se_tfo = se_tpg->se_tpg_tfo;
401

402
	spin_lock_irqsave(&se_tpg->session_lock, flags);
403 404 405
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
406
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
407 408 409 410 411 412

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
413 414 415 416 417 418 419 420 421 422 423 424 425

	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
	if (se_nacl && se_nacl->dynamic_node_acl) {
		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
			list_del(&se_nacl->acl_list);
			se_tpg->num_node_acls--;
			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
			core_tpg_wait_for_nacl_pr_ref(se_nacl);
			core_free_device_list_for_node(se_nacl, se_tpg);
			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);

			comp_nacl = false;
			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
426 427
		}
	}
428
	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
429

430
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
431
		se_tpg->se_tpg_tfo->get_fabric_name());
432
	/*
433 434 435
	 * If last kref is dropping now for an explict NodeACL, awake sleeping
	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
	 * removal context.
436 437
	 */
	if (se_nacl && comp_nacl == true)
438
		target_put_nacl(se_nacl);
439

440
	transport_free_session(se_sess);
441 442 443 444
}
EXPORT_SYMBOL(transport_deregister_session);

/*
445
 * Called with cmd->t_state_lock held.
446 447 448
 */
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
449
	struct se_device *dev = cmd->se_dev;
450 451 452
	struct se_task *task;
	unsigned long flags;

453 454
	if (!dev)
		return;
455

456
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
457
		if (task->task_flags & TF_ACTIVE)
458 459 460
			continue;

		spin_lock_irqsave(&dev->execute_task_lock, flags);
461 462 463
		if (task->t_state_active) {
			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
				cmd->se_tfo->get_task_tag(cmd), dev, task);
464

465 466 467 468 469
			list_del(&task->t_state_list);
			atomic_dec(&cmd->t_task_cdbs_ex_left);
			task->t_state_active = false;
		}
		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
470
	}
471

472 473 474 475
}

/*	transport_cmd_check_stop():
 *
476
 *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
477 478 479 480 481 482 483 484 485 486 487 488
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

489
	spin_lock_irqsave(&cmd->t_state_lock, flags);
490 491 492 493
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
494 495 496
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
497

498
		cmd->transport_state &= ~CMD_T_ACTIVE;
499 500
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);
501
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
502

503
		complete(&cmd->transport_lun_stop_comp);
504 505 506 507
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
508
	 * this command for frontend exceptions.
509
	 */
510 511 512
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
513
			cmd->se_tfo->get_task_tag(cmd));
514 515 516 517 518 519 520 521 522 523

		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
524
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
525

526
		complete(&cmd->t_transport_stop_comp);
527 528 529
		return 1;
	}
	if (transport_off) {
530
		cmd->transport_state &= ~CMD_T_ACTIVE;
531 532 533 534 535 536 537 538 539
		if (transport_off == 2) {
			transport_all_task_dev_remove_state(cmd);
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
540
			 * their internally allocated I/O reference now and
541
			 * struct se_cmd now.
542 543 544 545
			 *
			 * Fabric modules are expected to return '1' here if the
			 * se_cmd being passed is released at this point,
			 * or zero if not being released.
546
			 */
547
			if (cmd->se_tfo->check_stop_free != NULL) {
548
				spin_unlock_irqrestore(
549
					&cmd->t_state_lock, flags);
550

551
				return cmd->se_tfo->check_stop_free(cmd);
552 553
			}
		}
554
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555 556 557 558

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
559
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
560 561 562 563 564 565 566 567 568 569 570

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
571
	struct se_lun *lun = cmd->se_lun;
572 573 574 575 576
	unsigned long flags;

	if (!lun)
		return;

577
	spin_lock_irqsave(&cmd->t_state_lock, flags);
578 579 580
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
		transport_all_task_dev_remove_state(cmd);
581
	}
582
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
583 584

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
585 586
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
587 588 589 590 591
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
592
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
593
		transport_lun_remove_cmd(cmd);
594 595 596

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
597
	if (remove) {
598
		transport_remove_cmd_from_queue(cmd);
599
		transport_put_cmd(cmd);
600
	}
601 602
}

603 604
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
		bool at_head)
605 606
{
	struct se_device *dev = cmd->se_dev;
607
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
608 609 610
	unsigned long flags;

	if (t_state) {
611
		spin_lock_irqsave(&cmd->t_state_lock, flags);
612
		cmd->t_state = t_state;
613
		cmd->transport_state |= CMD_T_ACTIVE;
614
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
615 616 617
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
618 619 620 621 622 623 624

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

625
	if (at_head)
626
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
627
	else
628
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
629
	cmd->transport_state |= CMD_T_QUEUED;
630 631 632 633 634
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

635 636
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
637
{
638
	struct se_cmd *cmd;
639 640 641 642 643 644 645
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
646
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
647

648
	cmd->transport_state &= ~CMD_T_QUEUED;
649
	list_del_init(&cmd->se_queue_node);
650 651 652
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

653
	return cmd;
654 655
}

656
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
657
{
658
	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
659 660 661
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
662
	if (!(cmd->transport_state & CMD_T_QUEUED)) {
663 664 665
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
666
	cmd->transport_state &= ~CMD_T_QUEUED;
667 668
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
669 670 671 672 673 674 675 676 677
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}

/*
 * Completion function used by TCM subsystem plugins (such as FILEIO)
 * for queueing up response from struct se_subsystem_api->do_task()
 */
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
678
	struct se_task *task = list_entry(cmd->t_task_list.next,
679 680 681 682 683 684 685
				struct se_task, t_list);

	if (good) {
		cmd->scsi_status = SAM_STAT_GOOD;
		task->task_scsi_status = GOOD;
	} else {
		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
686 687 688
		task->task_se_cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

689 690 691 692 693 694
	}

	transport_complete_task(task, good);
}
EXPORT_SYMBOL(transport_complete_sync_cache);

695 696 697 698
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

699
	transport_generic_request_failure(cmd);
700 701
}

702 703 704 705 706 707 708
/*	transport_complete_task():
 *
 *	Called from interrupt and non interrupt context depending
 *	on the transport plugin.
 */
void transport_complete_task(struct se_task *task, int success)
{
709
	struct se_cmd *cmd = task->task_se_cmd;
710
	struct se_device *dev = cmd->se_dev;
711 712
	unsigned long flags;

713
	spin_lock_irqsave(&cmd->t_state_lock, flags);
714
	task->task_flags &= ~TF_ACTIVE;
715 716 717 718 719 720 721 722 723

	/*
	 * See if any sense data exists, if so set the TASK_SENSE flag.
	 * Also check for any other post completion work that needs to be
	 * done by the plugins.
	 */
	if (dev && dev->transport->transport_complete) {
		if (dev->transport->transport_complete(task) != 0) {
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
724
			task->task_flags |= TF_HAS_SENSE;
725 726 727 728 729 730 731 732
			success = 1;
		}
	}

	/*
	 * See if we are waiting for outstanding struct se_task
	 * to complete for an exception condition
	 */
733
	if (task->task_flags & TF_REQUEST_STOP) {
734
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
735 736 737
		complete(&task->task_stop_comp);
		return;
	}
738 739

	if (!success)
740
		cmd->transport_state |= CMD_T_FAILED;
741

742 743 744 745 746
	/*
	 * Decrement the outstanding t_task_cdbs_left count.  The last
	 * struct se_task from struct se_cmd will complete itself into the
	 * device queue depending upon int success.
	 */
747
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
748
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
749 750
		return;
	}
751 752 753 754 755 756 757 758 759 760
	/*
	 * Check for case where an explict ABORT_TASK has been received
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->t_transport_stop_comp);
		return;
	} else if (cmd->transport_state & CMD_T_FAILED) {
761
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
762
		INIT_WORK(&cmd->work, target_complete_failure_work);
763
	} else {
764
		INIT_WORK(&cmd->work, target_complete_ok_work);
765
	}
766 767

	cmd->t_state = TRANSPORT_COMPLETE;
768
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
769
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770

771
	queue_work(target_completion_wq, &cmd->work);
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
}
EXPORT_SYMBOL(transport_complete_task);

/*
 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
 * struct se_task list are ready to be added to the active execution list
 * struct se_device

 * Called with se_dev_t->execute_task_lock called.
 */
static inline int transport_add_task_check_sam_attr(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	/*
	 * No SAM Task attribute emulation enabled, add to tail of
	 * execution queue
	 */
	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
		return 0;
	}
	/*
	 * HEAD_OF_QUEUE attribute for received CDB, which means
	 * the first task that is associated with a struct se_cmd goes to
	 * head of the struct se_device->execute_task_list, and task_prev
	 * after that for each subsequent task
	 */
801
	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
802 803 804 805 806
		list_add(&task->t_execute_list,
				(task_prev != NULL) ?
				&task_prev->t_execute_list :
				&dev->execute_task_list);

807
		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
808
				" in execution queue\n",
809
				task->task_se_cmd->t_task_cdb[0]);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
		return 1;
	}
	/*
	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
	 * transitioned from Dermant -> Active state, and are added to the end
	 * of the struct se_device->execute_task_list
	 */
	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
	return 0;
}

/*	__transport_add_task_to_execute_queue():
 *
 *	Called with se_dev_t->execute_task_lock called.
 */
static void __transport_add_task_to_execute_queue(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	int head_of_queue;

	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
	atomic_inc(&dev->execute_tasks);

835
	if (task->t_state_active)
836 837 838 839 840 841 842 843 844 845 846 847 848
		return;
	/*
	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
	 * state list as well.  Running with SAM Task Attribute emulation
	 * will always return head_of_queue == 0 here
	 */
	if (head_of_queue)
		list_add(&task->t_state_list, (task_prev) ?
				&task_prev->t_state_list :
				&dev->state_task_list);
	else
		list_add_tail(&task->t_state_list, &dev->state_task_list);

849
	task->t_state_active = true;
850

851
	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
852
		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
853 854 855 856 857
		task, dev);
}

static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
858
	struct se_device *dev = cmd->se_dev;
859 860 861
	struct se_task *task;
	unsigned long flags;

862 863
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
864
		spin_lock(&dev->execute_task_lock);
865 866 867 868 869 870 871 872 873
		if (!task->t_state_active) {
			list_add_tail(&task->t_state_list,
				      &dev->state_task_list);
			task->t_state_active = true;

			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
				task->task_se_cmd->se_tfo->get_task_tag(
				task->task_se_cmd), task, dev);
		}
874 875
		spin_unlock(&dev->execute_task_lock);
	}
876
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
877 878
}

879
static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
880
{
881
	struct se_device *dev = cmd->se_dev;
882 883
	struct se_task *task, *task_prev = NULL;

884
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
885
		if (!list_empty(&task->t_execute_list))
886 887 888 889 890 891 892 893
			continue;
		/*
		 * __transport_add_task_to_execute_queue() handles the
		 * SAM Task Attribute emulation if enabled
		 */
		__transport_add_task_to_execute_queue(task, task_prev, dev);
		task_prev = task;
	}
894 895 896 897 898 899 900 901 902
}

static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
	unsigned long flags;
	struct se_device *dev = cmd->se_dev;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
	__transport_add_tasks_from_cmd(cmd);
903 904 905
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

906 907 908 909 910 911 912
void __transport_remove_task_from_execute_queue(struct se_task *task,
		struct se_device *dev)
{
	list_del_init(&task->t_execute_list);
	atomic_dec(&dev->execute_tasks);
}

C
Christoph Hellwig 已提交
913
static void transport_remove_task_from_execute_queue(
914 915 916 917 918
	struct se_task *task,
	struct se_device *dev)
{
	unsigned long flags;

919
	if (WARN_ON(list_empty(&task->t_execute_list)))
920 921
		return;

922
	spin_lock_irqsave(&dev->execute_task_lock, flags);
923
	__transport_remove_task_from_execute_queue(task, dev);
924 925 926
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

927
/*
928
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
929 930 931 932 933 934
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
935
	LIST_HEAD(qf_cmd_list);
936 937 938
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
939 940
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
941

942
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
943 944 945 946
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

947
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
948
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
949
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
950 951
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
952 953

		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
954 955 956
	}
}

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

1000 1001
	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
		atomic_read(&dev->execute_tasks), dev->queue_depth);
1002
	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1003
		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1057
		pr_debug("%s", buf);
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1082 1083
	int ret = 0;
	int len;
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1100
		ret = -EINVAL;
1101 1102 1103 1104 1105 1106
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1107
		pr_debug("%s", buf);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1130 1131
	int ret = 0;
	int len;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1158
		ret = -EINVAL;
1159 1160 1161
		break;
	}

1162 1163 1164
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1165
		strncpy(p_buf, buf, p_buf_len);
1166
	} else {
1167
		pr_debug("%s", buf);
1168
	}
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1211
		ret = -EINVAL;
1212 1213 1214 1215 1216 1217
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1218
		pr_debug("%s", buf);
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1269
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1270 1271 1272 1273 1274
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1275
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1276 1277
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1278 1279 1280 1281
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1282
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1283
	char buf[17];
1284 1285 1286 1287 1288 1289
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1290
			buf[i] = wwn->vendor[i];
1291
		else
1292 1293 1294
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1295 1296 1297

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1298
			buf[i] = wwn->model[i];
1299
		else
1300 1301 1302
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1303 1304 1305

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1306
			buf[i] = wwn->revision[i];
1307
		else
1308 1309 1310
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1311

1312
	device_type = dev->transport->get_device_type(dev);
1313 1314
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1315
				dev->transport->get_device_rev(dev));
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1328
	int force_pt;
1329 1330 1331
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1332 1333
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1334 1335 1336
		return NULL;
	}

1337
	transport_init_queue_obj(&dev->dev_queue_obj);
1338 1339
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1340
	dev->dev_ptr		= transport_dev;
1341 1342 1343 1344 1345 1346 1347 1348 1349
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->execute_task_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_task_list);
1350
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1351 1352 1353 1354 1355 1356
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1357
	spin_lock_init(&dev->qf_cmd_lock);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1392
					  "LIO_%s", dev->transport->name);
1393
	if (IS_ERR(dev->process_thread)) {
1394
		pr_err("Unable to create kthread: LIO_%s\n",
1395
			dev->transport->name);
1396 1397
		goto out;
	}
1398 1399 1400 1401
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1402 1403 1404 1405 1406 1407 1408 1409
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1410
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1411
		if (!inquiry_prod || !inquiry_rev) {
1412
			pr_err("All non TCM/pSCSI plugins require"
1413 1414 1415 1416
				" INQUIRY consts\n");
			goto out;
		}

1417 1418 1419
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1420 1421 1422
	}
	scsi_dump_inquiry(dev);

1423
	return dev;
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

static struct se_task *
transport_generic_get_task(struct se_cmd *cmd,
		enum dma_data_direction data_direction)
{
	struct se_task *task;
1472
	struct se_device *dev = cmd->se_dev;
1473

1474
	task = dev->transport->alloc_task(cmd->t_task_cdb);
1475
	if (!task) {
1476
		pr_err("Unable to allocate struct se_task\n");
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
		return NULL;
	}

	INIT_LIST_HEAD(&task->t_list);
	INIT_LIST_HEAD(&task->t_execute_list);
	INIT_LIST_HEAD(&task->t_state_list);
	init_completion(&task->task_stop_comp);
	task->task_se_cmd = cmd;
	task->task_data_direction = data_direction;

	return task;
}

static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1505 1506
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1507
	INIT_LIST_HEAD(&cmd->se_qf_node);
1508
	INIT_LIST_HEAD(&cmd->se_queue_node);
1509
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1510 1511 1512 1513
	INIT_LIST_HEAD(&cmd->t_task_list);
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1514
	init_completion(&cmd->cmd_wait_comp);
1515
	spin_lock_init(&cmd->t_state_lock);
1516
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1533
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1534 1535
		return 0;

1536
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1537
		pr_debug("SAM Task Attribute ACA"
1538
			" emulation is not supported\n");
1539
		return -EINVAL;
1540 1541 1542 1543 1544
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1545
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1546
	smp_mb__after_atomic_inc();
1547
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1548
			cmd->se_ordered_id, cmd->sam_task_attr,
1549
			cmd->se_dev->transport->name);
1550 1551 1552
	return 0;
}

1553
/*	target_setup_cmd_from_cdb():
1554 1555 1556
 *
 *	Called from fabric RX Thread.
 */
1557
int target_setup_cmd_from_cdb(
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
	struct se_cmd *cmd,
	unsigned char *cdb)
{
	int ret;

	transport_generic_prepare_cdb(cdb);
	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1569
		pr_err("Received SCSI CDB with command_size: %d that"
1570 1571
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1572 1573
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1574
		return -EINVAL;
1575 1576 1577 1578 1579 1580
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1581 1582
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1583
						GFP_KERNEL);
1584 1585
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1586
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1587
				scsi_command_size(cdb),
1588
				(unsigned long)sizeof(cmd->__t_task_cdb));
1589 1590 1591
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1592
			return -ENOMEM;
1593 1594
		}
	} else
1595
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1596
	/*
1597
	 * Copy the original CDB into cmd->
1598
	 */
1599
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1600 1601 1602
	/*
	 * Setup the received CDB based on SCSI defined opcodes and
	 * perform unit attention, persistent reservations and ALUA
1603
	 * checks for virtual device backends.  The cmd->t_task_cdb
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	 * pointer is expected to be setup before we reach this point.
	 */
	ret = transport_generic_cmd_sequencer(cmd, cdb);
	if (ret < 0)
		return ret;
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1615
		return -EINVAL;
1616 1617 1618 1619 1620 1621 1622
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
1623
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1624

1625 1626 1627 1628 1629 1630 1631
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1632 1633
	int ret;

1634 1635
	if (!cmd->se_lun) {
		dump_stack();
1636
		pr_err("cmd->se_lun is NULL\n");
1637 1638 1639 1640
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1641
		pr_err("transport_generic_handle_cdb cannot be called"
1642 1643 1644
				" from interrupt context\n");
		return -EINVAL;
	}
1645
	/*
1646
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1647 1648
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
1649
	 * correctly during shutdown via transport_wait_for_tasks()
1650 1651 1652 1653 1654
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1655 1656
	cmd->transport_state |= CMD_T_ACTIVE;

1657 1658 1659 1660 1661 1662
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1663 1664 1665
	if (ret < 0)
		transport_generic_request_failure(cmd);

1666
	return 0;
1667 1668 1669
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1686
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
1704 1705
	if (flags & TARGET_SCF_UNKNOWN_SIZE)
		se_cmd->unknown_data_length = 1;
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1721 1722 1723 1724 1725 1726
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
		return;
	}
1727 1728 1729 1730
	/*
	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
	 * allocate the necessary tasks to complete the received CDB+data
	 */
1731
	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1732 1733 1734 1735
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
		return;
	}
1736 1737 1738 1739 1740 1741 1742

	/*
	 * Check if we need to delay processing because of ALUA
	 * Active/NonOptimized primary access state..
	 */
	core_alua_check_nonop_delay(se_cmd);

1743 1744 1745 1746 1747 1748 1749
	/*
	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
	 * for immediate execution of READs, otherwise wait for
	 * transport_generic_handle_data() to be called for WRITEs
	 * when fabric has filled the incoming buffer.
	 */
	transport_handle_cdb_direct(se_cmd);
1750
	return;
1751 1752 1753
}
EXPORT_SYMBOL(target_submit_cmd);

1754 1755 1756 1757 1758 1759 1760 1761 1762
static void target_complete_tmr_failure(struct work_struct *work)
{
	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);

	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
	transport_generic_free_cmd(se_cmd, 0);
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1773 1774
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
1775
 * @flags: submit cmd flags
1776 1777 1778 1779
 *
 * Callable from all contexts.
 **/

1780
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1781
		unsigned char *sense, u32 unpacked_lun,
1782 1783
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, unsigned int tag, int flags)
1784 1785 1786 1787 1788 1789 1790 1791 1792
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1793 1794 1795 1796
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
1797
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1798 1799
	if (ret < 0)
		return -ENOMEM;
1800

1801 1802 1803
	if (tm_type == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = tag;

1804 1805 1806 1807 1808
	/* See target_submit_cmd for commentary */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1809 1810 1811 1812 1813 1814
		/*
		 * For callback during failure handling, push this work off
		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
		 */
		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
		schedule_work(&se_cmd->work);
1815
		return 0;
1816 1817
	}
	transport_generic_handle_tmr(se_cmd);
1818
	return 0;
1819 1820 1821
}
EXPORT_SYMBOL(target_submit_tmr);

1822 1823 1824 1825 1826 1827 1828 1829
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1830
	if (!cmd->se_lun) {
1831
		dump_stack();
1832
		pr_err("cmd->se_lun is NULL\n");
1833
		return -EINVAL;
1834 1835
	}

1836
	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1855
		return -EPERM;
1856 1857 1858 1859
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1860
	 * fabric module as we are expecting no further incoming DATA OUT
1861 1862 1863 1864 1865
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

1866
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
1878
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1879 1880 1881 1882
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
/*
 * If the task is active, request it to be stopped and sleep until it
 * has completed.
 */
bool target_stop_task(struct se_task *task, unsigned long *flags)
{
	struct se_cmd *cmd = task->task_se_cmd;
	bool was_active = false;

	if (task->task_flags & TF_ACTIVE) {
		task->task_flags |= TF_REQUEST_STOP;
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

		pr_debug("Task %p waiting to complete\n", task);
		wait_for_completion(&task->task_stop_comp);
		pr_debug("Task %p stopped successfully\n", task);

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
		atomic_dec(&cmd->t_task_cdbs_left);
		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
		was_active = true;
	}

	return was_active;
}

1909 1910 1911 1912 1913 1914
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
	int ret = 0;

1915
	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1916
		cmd->se_tfo->get_task_tag(cmd));
1917 1918 1919 1920

	/*
	 * No tasks remain in the execution queue
	 */
1921
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1922
	list_for_each_entry_safe(task, task_tmp,
1923
				&cmd->t_task_list, t_list) {
1924
		pr_debug("Processing task %p\n", task);
1925 1926 1927 1928
		/*
		 * If the struct se_task has not been sent and is not active,
		 * remove the struct se_task from the execution queue.
		 */
1929
		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1930
			spin_unlock_irqrestore(&cmd->t_state_lock,
1931 1932
					flags);
			transport_remove_task_from_execute_queue(task,
1933
					cmd->se_dev);
1934

1935
			pr_debug("Task %p removed from execute queue\n", task);
1936
			spin_lock_irqsave(&cmd->t_state_lock, flags);
1937 1938 1939
			continue;
		}

1940
		if (!target_stop_task(task, &flags)) {
1941
			pr_debug("Task %p - did nothing\n", task);
1942 1943 1944
			ret++;
		}
	}
1945
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1946 1947 1948 1949 1950 1951 1952

	return ret;
}

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1953
void transport_generic_request_failure(struct se_cmd *cmd)
1954
{
1955 1956
	int ret = 0;

1957
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1958
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1959
		cmd->t_task_cdb[0]);
1960
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1961
		cmd->se_tfo->get_cmd_state(cmd),
1962
		cmd->t_state, cmd->scsi_sense_reason);
1963
	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1964
		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1965 1966
		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
		cmd->t_task_list_num,
1967 1968 1969
		atomic_read(&cmd->t_task_cdbs_left),
		atomic_read(&cmd->t_task_cdbs_sent),
		atomic_read(&cmd->t_task_cdbs_ex_left),
1970 1971 1972
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1973 1974 1975 1976 1977 1978 1979

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1991
		break;
1992
	case TCM_RESERVATION_CONFLICT:
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
2007 2008 2009
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2010 2011 2012
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

2013
		ret = cmd->se_tfo->queue_status(cmd);
2014
		if (ret == -EAGAIN || ret == -ENOMEM)
2015
			goto queue_full;
2016 2017
		goto check_stop;
	default:
2018
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2019
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
2020 2021 2022
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
2023 2024 2025 2026 2027 2028 2029
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
2030 2031 2032 2033
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
2034

2035 2036
check_stop:
	transport_lun_remove_cmd(cmd);
2037
	if (!transport_cmd_check_stop_to_fabric(cmd))
2038
		;
2039 2040 2041
	return;

queue_full:
2042 2043
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
2044
}
2045
EXPORT_SYMBOL(transport_generic_request_failure);
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083

static inline u32 transport_lba_21(unsigned char *cdb)
{
	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}

static inline u32 transport_lba_32(unsigned char *cdb)
{
	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}

static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

/*
 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 */
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
	unsigned long flags;

2084
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2085
	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2086
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
}

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
2098
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2099 2100
		return 1;
	/*
L
Lucas De Marchi 已提交
2101
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2102 2103
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
2104
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2105
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
2106
			" 0x%02x, se_ordered_id: %u\n",
2107
			cmd->t_task_cdb[0],
2108 2109
			cmd->se_ordered_id);
		return 1;
2110
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2111
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
2112 2113
		smp_mb__after_atomic_inc();

2114
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2115
				" list, se_ordered_id: %u\n",
2116
				cmd->t_task_cdb[0],
2117 2118 2119 2120 2121 2122
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
2123
		if (!atomic_read(&cmd->se_dev->simple_cmds))
2124 2125 2126 2127 2128
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
2129
		atomic_inc(&cmd->se_dev->simple_cmds);
2130 2131 2132 2133 2134 2135 2136
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2137
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2138 2139
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2140
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2141
		 */
2142
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2143
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2144 2145 2146
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2147

2148
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2149
			" delayed CMD list, se_ordered_id: %u\n",
2150
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
static int transport_execute_tasks(struct se_cmd *cmd)
{
	int add_tasks;
2171
	struct se_device *se_dev = cmd->se_dev;
2172 2173
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2174
	 * has occurred that prevents execution.
2175
	 */
2176
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2177 2178 2179 2180 2181
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2182
		if (!add_tasks)
2183 2184
			goto execute_tasks;
		/*
2185 2186 2187
		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
		 * adds associated se_tasks while holding dev->execute_task_lock
		 * before I/O dispath to avoid a double spinlock access.
2188
		 */
2189 2190
		__transport_execute_tasks(se_dev, cmd);
		return 0;
2191
	}
2192

2193
execute_tasks:
2194
	__transport_execute_tasks(se_dev, NULL);
2195 2196 2197 2198 2199 2200 2201 2202 2203
	return 0;
}

/*
 * Called to check struct se_device tcq depth window, and once open pull struct se_task
 * from struct se_device->execute_task_list and
 *
 * Called from transport_processing_thread()
 */
2204
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2205 2206 2207
{
	int error;
	struct se_cmd *cmd = NULL;
2208
	struct se_task *task = NULL;
2209 2210 2211
	unsigned long flags;

check_depth:
2212
	spin_lock_irq(&dev->execute_task_lock);
2213 2214 2215
	if (new_cmd != NULL)
		__transport_add_tasks_from_cmd(new_cmd);

2216 2217
	if (list_empty(&dev->execute_task_list)) {
		spin_unlock_irq(&dev->execute_task_lock);
2218 2219
		return 0;
	}
2220 2221
	task = list_first_entry(&dev->execute_task_list,
				struct se_task, t_execute_list);
2222
	__transport_remove_task_from_execute_queue(task, dev);
2223
	spin_unlock_irq(&dev->execute_task_lock);
2224

2225
	cmd = task->task_se_cmd;
2226
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2227
	task->task_flags |= (TF_ACTIVE | TF_SENT);
2228
	atomic_inc(&cmd->t_task_cdbs_sent);
2229

2230 2231
	if (atomic_read(&cmd->t_task_cdbs_sent) ==
	    cmd->t_task_list_num)
2232
		cmd->transport_state |= CMD_T_SENT;
2233

2234
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2235

2236 2237 2238 2239
	if (cmd->execute_task)
		error = cmd->execute_task(task);
	else
		error = dev->transport->do_task(task);
2240 2241 2242
	if (error != 0) {
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		task->task_flags &= ~TF_ACTIVE;
2243
		cmd->transport_state &= ~CMD_T_SENT;
2244
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2245

2246
		transport_stop_tasks_for_cmd(cmd);
2247
		transport_generic_request_failure(cmd);
2248 2249
	}

2250
	new_cmd = NULL;
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
	goto check_depth;

	return 0;
}

static inline u32 transport_get_sectors_6(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2261
	struct se_device *dev = cmd->se_dev;
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 8-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2273
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2274 2275 2276 2277
		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
2278 2279 2280 2281 2282 2283
	 * Use 8-bit sector value.  SBC-3 says:
	 *
	 *   A TRANSFER LENGTH field set to zero specifies that 256
	 *   logical blocks shall be written.  Any other value
	 *   specifies the number of logical blocks that shall be
	 *   written.
2284 2285
	 */
type_disk:
2286
	return cdb[4] ? : 256;
2287 2288 2289 2290 2291 2292 2293
}

static inline u32 transport_get_sectors_10(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2294
	struct se_device *dev = cmd->se_dev;
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 16-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_10 is not defined in SSC, throw an exception
	 */
2306 2307
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 16-bit sector value.
	 */
type_disk:
	return (u32)(cdb[7] << 8) + cdb[8];
}

static inline u32 transport_get_sectors_12(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2324
	struct se_device *dev = cmd->se_dev;
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_12 is not defined in SSC, throw an exception
	 */
2336 2337
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 32-bit sector value.
	 */
type_disk:
	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}

static inline u32 transport_get_sectors_16(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2354
	struct se_device *dev = cmd->se_dev;
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2366
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];

type_disk:
	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
		    (cdb[12] << 8) + cdb[13];
}

/*
 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 */
static inline u32 transport_get_sectors_32(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
		    (cdb[30] << 8) + cdb[31];

}

static inline u32 transport_get_size(
	u32 sectors,
	unsigned char *cdb,
	struct se_cmd *cmd)
{
2396
	struct se_device *dev = cmd->se_dev;
2397

2398
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2399
		if (cdb[1] & 1) { /* sectors */
2400
			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2401 2402 2403
		} else /* bytes */
			return sectors;
	}
2404

2405
	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2406 2407 2408 2409
		" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
		sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
		dev->transport->name);

2410
	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2411 2412 2413 2414 2415
}

static void transport_xor_callback(struct se_cmd *cmd)
{
	unsigned char *buf, *addr;
2416
	struct scatterlist *sg;
2417 2418
	unsigned int offset;
	int i;
2419
	int count;
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
	/*
	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
	 *
	 * 1) read the specified logical block(s);
	 * 2) transfer logical blocks from the data-out buffer;
	 * 3) XOR the logical blocks transferred from the data-out buffer with
	 *    the logical blocks read, storing the resulting XOR data in a buffer;
	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
	 *    blocks transferred from the data-out buffer; and
	 * 5) transfer the resulting XOR data to the data-in buffer.
	 */
	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2432 2433
	if (!buf) {
		pr_err("Unable to allocate xor_callback buf\n");
2434 2435 2436
		return;
	}
	/*
2437
	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2438 2439
	 * into the locally allocated *buf
	 */
2440 2441 2442 2443 2444
	sg_copy_to_buffer(cmd->t_data_sg,
			  cmd->t_data_nents,
			  buf,
			  cmd->data_length);

2445 2446
	/*
	 * Now perform the XOR against the BIDI read memory located at
2447
	 * cmd->t_mem_bidi_list
2448 2449 2450
	 */

	offset = 0;
2451
	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2452
		addr = kmap_atomic(sg_page(sg));
2453
		if (!addr)
2454 2455
			goto out;

2456 2457
		for (i = 0; i < sg->length; i++)
			*(addr + sg->offset + i) ^= *(buf + offset + i);
2458

2459
		offset += sg->length;
2460
		kunmap_atomic(addr);
2461
	}
2462

2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
out:
	kfree(buf);
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2473
	struct se_device *dev = cmd->se_dev;
2474 2475 2476 2477
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;
	u32 offset = 0;

2478 2479
	WARN_ON(!cmd->se_lun);

2480 2481 2482
	if (!dev)
		return 0;

2483
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2484
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2485
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2486 2487 2488 2489
		return 0;
	}

	list_for_each_entry_safe(task, task_tmp,
2490
				&cmd->t_task_list, t_list) {
2491
		if (!(task->task_flags & TF_HAS_SENSE))
2492 2493
			continue;

2494
		if (!dev->transport->get_sense_buffer) {
2495
			pr_err("dev->transport->get_sense_buffer"
2496 2497 2498 2499
					" is NULL\n");
			continue;
		}

2500
		sense_buffer = dev->transport->get_sense_buffer(task);
2501
		if (!sense_buffer) {
2502
			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2503
				" sense buffer for task with sense\n",
2504
				cmd->se_tfo->get_task_tag(cmd), task);
2505 2506
			continue;
		}
2507
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2508

2509
		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2510 2511
				TRANSPORT_SENSE_BUFFER);

2512
		memcpy(&buffer[offset], sense_buffer,
2513 2514 2515 2516 2517 2518
				TRANSPORT_SENSE_BUFFER);
		cmd->scsi_status = task->task_scsi_status;
		/* Automatically padded */
		cmd->scsi_sense_length =
				(TRANSPORT_SENSE_BUFFER + offset);

2519
		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2520
				" and sense\n",
2521
			dev->se_hba->hba_id, dev->transport->name,
2522 2523 2524
				cmd->scsi_status);
		return 0;
	}
2525
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2526 2527 2528 2529

	return -1;
}

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2545 2546
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2547 2548 2549
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2550
		return -EINVAL;
2551 2552
	}

2553
	return 0;
2554 2555
}

2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
	/*
	 * Determine if the received WRITE_SAME is used to for direct
	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
	 */
	int passthrough = (dev->transport->transport_type ==
				TRANSPORT_PLUGIN_PHBA_PDEV);

	if (!passthrough) {
		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
			pr_err("WRITE_SAME PBDATA and LBDATA"
				" bits not supported for Block Discard"
				" Emulation\n");
			return -ENOSYS;
		}
		/*
		 * Currently for the emulated case we only accept
		 * tpws with the UNMAP=1 bit set.
		 */
		if (!(flags[0] & 0x08)) {
			pr_err("WRITE_SAME w/o UNMAP bit not"
				" supported for Block Discard Emulation\n");
			return -ENOSYS;
		}
	}

	return 0;
}

2588 2589 2590 2591 2592
/*	transport_generic_cmd_sequencer():
 *
 *	Generic Command Sequencer that should work for most DAS transport
 *	drivers.
 *
2593
 *	Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
2594 2595 2596 2597 2598 2599 2600 2601
 *	RX Thread.
 *
 *	FIXME: Need to support other SCSI OPCODES where as well.
 */
static int transport_generic_cmd_sequencer(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
2602
	struct se_device *dev = cmd->se_dev;
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	int ret = 0, sector_ret = 0, passthrough;
	u32 sectors = 0, size = 0, pr_reg_type = 0;
	u16 service_action;
	u8 alua_ascq = 0;
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2614
		return -EINVAL;
2615 2616 2617 2618
	}
	/*
	 * Check status of Asymmetric Logical Unit Assignment port
	 */
2619
	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2620 2621
	if (ret != 0) {
		/*
L
Lucas De Marchi 已提交
2622
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2623 2624 2625 2626
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
2627
			pr_debug("[%s]: ALUA TG Port not available,"
2628
				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2629
				cmd->se_tfo->get_fabric_name(), alua_ascq);
2630

2631 2632 2633
			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2634
			return -EINVAL;
2635 2636 2637 2638 2639 2640
		}
		goto out_invalid_cdb_field;
	}
	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
2641 2642
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2643 2644 2645
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2646
			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2647 2648 2649
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
2650 2651 2652 2653 2654 2655 2656
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

2657 2658 2659 2660 2661 2662 2663
	/*
	 * If we operate in passthrough mode we skip most CDB emulation and
	 * instead hand the commands down to the physical SCSI device.
	 */
	passthrough =
		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);

2664 2665 2666 2667 2668 2669
	switch (cdb[0]) {
	case READ_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2670
		cmd->t_task_lba = transport_lba_21(cdb);
2671 2672 2673 2674 2675 2676 2677
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2678
		cmd->t_task_lba = transport_lba_32(cdb);
2679 2680 2681 2682 2683 2684 2685
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2686
		cmd->t_task_lba = transport_lba_32(cdb);
2687 2688 2689 2690 2691 2692 2693
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2694
		cmd->t_task_lba = transport_lba_64(cdb);
2695 2696 2697 2698 2699 2700 2701
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2702
		cmd->t_task_lba = transport_lba_21(cdb);
2703 2704 2705 2706 2707 2708 2709
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2710
		cmd->t_task_lba = transport_lba_32(cdb);
2711 2712
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2713 2714 2715 2716 2717 2718 2719
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2720
		cmd->t_task_lba = transport_lba_32(cdb);
2721 2722
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2723 2724 2725 2726 2727 2728 2729
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2730
		cmd->t_task_lba = transport_lba_64(cdb);
2731 2732
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2733 2734 2735 2736
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case XDWRITEREAD_10:
		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2737
		    !(cmd->se_cmd_flags & SCF_BIDI))
2738 2739 2740 2741 2742
			goto out_invalid_cdb_field;
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2743
		cmd->t_task_lba = transport_lba_32(cdb);
2744
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2745

2746 2747 2748 2749
		/*
		 * Do now allow BIDI commands for passthrough mode.
		 */
		if (passthrough)
2750
			goto out_unsupported_cdb;
2751

2752
		/*
2753
		 * Setup BIDI XOR callback to be run after I/O completion.
2754 2755
		 */
		cmd->transport_complete_callback = &transport_xor_callback;
2756 2757
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
		break;
	case VARIABLE_LENGTH_CMD:
		service_action = get_unaligned_be16(&cdb[8]);
		switch (service_action) {
		case XDWRITEREAD_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
			size = transport_get_size(sectors, cdb, cmd);
			/*
			 * Use WRITE_32 and READ_32 opcodes for the emulated
			 * XDWRITE_READ_32 logic.
			 */
2771
			cmd->t_task_lba = transport_lba_64_ext(cdb);
2772 2773
			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;

2774 2775 2776
			/*
			 * Do now allow BIDI commands for passthrough mode.
			 */
2777
			if (passthrough)
2778
				goto out_unsupported_cdb;
2779

2780
			/*
2781 2782
			 * Setup BIDI XOR callback to be run during after I/O
			 * completion.
2783 2784
			 */
			cmd->transport_complete_callback = &transport_xor_callback;
2785 2786
			if (cdb[1] & 0x8)
				cmd->se_cmd_flags |= SCF_FUA;
2787 2788 2789 2790 2791
			break;
		case WRITE_SAME_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
2792

2793
			if (sectors)
2794
				size = transport_get_size(1, cdb, cmd);
2795 2796 2797 2798 2799
			else {
				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
				       " supported\n");
				goto out_invalid_cdb_field;
			}
2800

2801
			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2802 2803
			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

2804
			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2805
				goto out_unsupported_cdb;
2806 2807
			if (!passthrough)
				cmd->execute_task = target_emulate_write_same;
2808 2809
			break;
		default:
2810
			pr_err("VARIABLE_LENGTH_CMD service action"
2811 2812 2813 2814
				" 0x%04x not supported\n", service_action);
			goto out_unsupported_cdb;
		}
		break;
2815
	case MAINTENANCE_IN:
2816
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2817 2818 2819 2820
			/* MAINTENANCE_IN from SCC-2 */
			/*
			 * Check for emulated MI_REPORT_TARGET_PGS.
			 */
2821 2822 2823 2824
			if (cdb[1] == MI_REPORT_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_report_target_port_groups;
2825 2826 2827 2828 2829 2830 2831
			}
			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else {
			/* GPCMD_SEND_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2832
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843
		break;
	case MODE_SELECT:
		size = cdb[4];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SELECT_10:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SENSE:
		size = cdb[4];
2844
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2845 2846
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
2847 2848
		break;
	case MODE_SENSE_10:
2849 2850 2851 2852 2853
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
		break;
2854 2855 2856 2857 2858
	case GPCMD_READ_BUFFER_CAPACITY:
	case GPCMD_SEND_OPC:
	case LOG_SELECT:
	case LOG_SENSE:
		size = (cdb[7] << 8) + cdb[8];
2859
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2860 2861 2862
		break;
	case READ_BLOCK_LIMITS:
		size = READ_BLOCK_LEN;
2863
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2864 2865 2866 2867 2868 2869 2870 2871 2872
		break;
	case GPCMD_GET_CONFIGURATION:
	case GPCMD_READ_FORMAT_CAPACITIES:
	case GPCMD_READ_DISC_INFO:
	case GPCMD_READ_TRACK_RZONE_INFO:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case PERSISTENT_RESERVE_IN:
2873
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2874
			cmd->execute_task = target_scsi3_emulate_pr_in;
2875 2876 2877
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
2878
	case PERSISTENT_RESERVE_OUT:
2879
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2880
			cmd->execute_task = target_scsi3_emulate_pr_out;
2881
		size = (cdb[7] << 8) + cdb[8];
2882
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2883 2884 2885 2886 2887 2888 2889 2890
		break;
	case GPCMD_MECHANISM_STATUS:
	case GPCMD_READ_DVD_STRUCTURE:
		size = (cdb[8] << 8) + cdb[9];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case READ_POSITION:
		size = READ_POSITION_LEN;
2891
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2892
		break;
2893
	case MAINTENANCE_OUT:
2894
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2895 2896 2897 2898
			/* MAINTENANCE_OUT from SCC-2
			 *
			 * Check for emulated MO_SET_TARGET_PGS.
			 */
2899 2900 2901 2902
			if (cdb[1] == MO_SET_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_set_target_port_groups;
2903 2904 2905 2906 2907 2908 2909 2910
			}

			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else  {
			/* GPCMD_REPORT_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2911
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2912 2913 2914 2915 2916 2917 2918
		break;
	case INQUIRY:
		size = (cdb[3] << 8) + cdb[4];
		/*
		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
		 * See spc4r17 section 5.3
		 */
2919
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2920
			cmd->sam_task_attr = MSG_HEAD_TAG;
2921
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2922 2923
		if (!passthrough)
			cmd->execute_task = target_emulate_inquiry;
2924 2925 2926
		break;
	case READ_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2927
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2928 2929 2930
		break;
	case READ_CAPACITY:
		size = READ_CAP_LEN;
2931
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2932 2933
		if (!passthrough)
			cmd->execute_task = target_emulate_readcapacity;
2934 2935 2936 2937 2938
		break;
	case READ_MEDIA_SERIAL_NUMBER:
	case SECURITY_PROTOCOL_IN:
	case SECURITY_PROTOCOL_OUT:
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2939
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2940 2941
		break;
	case SERVICE_ACTION_IN:
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
		switch (cmd->t_task_cdb[1] & 0x1f) {
		case SAI_READ_CAPACITY_16:
			if (!passthrough)
				cmd->execute_task =
					target_emulate_readcapacity_16;
			break;
		default:
			if (passthrough)
				break;

			pr_err("Unsupported SA: 0x%02x\n",
				cmd->t_task_cdb[1] & 0x1f);
2954
			goto out_invalid_cdb_field;
2955 2956
		}
		/*FALLTHROUGH*/
2957 2958 2959 2960 2961 2962 2963 2964
	case ACCESS_CONTROL_IN:
	case ACCESS_CONTROL_OUT:
	case EXTENDED_COPY:
	case READ_ATTRIBUTE:
	case RECEIVE_COPY_RESULTS:
	case WRITE_ATTRIBUTE:
		size = (cdb[10] << 24) | (cdb[11] << 16) |
		       (cdb[12] << 8) | cdb[13];
2965
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2966 2967 2968 2969
		break;
	case RECEIVE_DIAGNOSTIC:
	case SEND_DIAGNOSTIC:
		size = (cdb[3] << 8) | cdb[4];
2970
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2971 2972 2973 2974 2975 2976
		break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
	case GPCMD_READ_CD:
		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
		size = (2336 * sectors);
2977
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2978 2979 2980 2981
		break;
#endif
	case READ_TOC:
		size = cdb[8];
2982
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2983 2984 2985
		break;
	case REQUEST_SENSE:
		size = cdb[4];
2986
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2987 2988
		if (!passthrough)
			cmd->execute_task = target_emulate_request_sense;
2989 2990 2991
		break;
	case READ_ELEMENT_STATUS:
		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2992
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2993 2994 2995
		break;
	case WRITE_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2996
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
		break;
	case RESERVE:
	case RESERVE_10:
		/*
		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		 */
		if (cdb[0] == RESERVE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		/*
		 * Setup the legacy emulated handler for SPC-2 and
		 * >= SPC-3 compatible reservation handling (CRH=1)
		 * Otherwise, we assume the underlying SCSI logic is
		 * is running in SPC_PASSTHROUGH, and wants reservations
		 * emulation disabled.
		 */
3016 3017
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_reserve;
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case RELEASE:
	case RELEASE_10:
		/*
		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		*/
		if (cdb[0] == RELEASE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

3031 3032
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_release;
3033 3034 3035
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case SYNCHRONIZE_CACHE:
3036
	case SYNCHRONIZE_CACHE_16:
3037 3038 3039 3040 3041
		/*
		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
		 */
		if (cdb[0] == SYNCHRONIZE_CACHE) {
			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3042
			cmd->t_task_lba = transport_lba_32(cdb);
3043 3044
		} else {
			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3045
			cmd->t_task_lba = transport_lba_64(cdb);
3046 3047 3048 3049 3050 3051 3052
		}
		if (sector_ret)
			goto out_unsupported_cdb;

		size = transport_get_size(sectors, cdb, cmd);
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;

3053
		if (passthrough)
3054
			break;
3055

3056 3057
		/*
		 * Check to ensure that LBA + Range does not exceed past end of
3058
		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3059
		 */
3060 3061 3062 3063
		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
			if (transport_cmd_get_valid_sectors(cmd) < 0)
				goto out_invalid_cdb_field;
		}
3064
		cmd->execute_task = target_emulate_synchronize_cache;
3065 3066 3067
		break;
	case UNMAP:
		size = get_unaligned_be16(&cdb[7]);
3068
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3069 3070
		if (!passthrough)
			cmd->execute_task = target_emulate_unmap;
3071 3072 3073 3074 3075
		break;
	case WRITE_SAME_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
3076

3077
		if (sectors)
3078
			size = transport_get_size(1, cdb, cmd);
3079 3080 3081 3082
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
		}
3083

3084
		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3085 3086 3087
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3088
			goto out_unsupported_cdb;
3089 3090
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3091 3092 3093 3094 3095 3096 3097
		break;
	case WRITE_SAME:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;

		if (sectors)
3098
			size = transport_get_size(1, cdb, cmd);
3099 3100 3101
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
3102
		}
3103 3104

		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3105
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3106 3107 3108 3109 3110
		/*
		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
		 * of byte 1 bit 3 UNMAP instead of original reserved field
		 */
		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3111
			goto out_unsupported_cdb;
3112 3113
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
		break;
	case ALLOW_MEDIUM_REMOVAL:
	case ERASE:
	case REZERO_UNIT:
	case SEEK_10:
	case SPACE:
	case START_STOP:
	case TEST_UNIT_READY:
	case VERIFY:
	case WRITE_FILEMARKS:
3124 3125 3126 3127 3128 3129 3130 3131
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_noop;
		break;
	case GPCMD_CLOSE_TRACK:
	case INITIALIZE_ELEMENT_STATUS:
	case GPCMD_LOAD_UNLOAD:
	case GPCMD_SET_SPEED:
3132 3133 3134 3135
	case MOVE_MEDIUM:
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case REPORT_LUNS:
3136
		cmd->execute_task = target_report_luns;
3137 3138 3139 3140 3141
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
		/*
		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
		 * See spc4r17 section 5.3
		 */
3142
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3143
			cmd->sam_task_attr = MSG_HEAD_TAG;
3144
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3145 3146
		break;
	default:
3147
		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3148
			" 0x%02x, sending CHECK_CONDITION.\n",
3149
			cmd->se_tfo->get_fabric_name(), cdb[0]);
3150 3151 3152
		goto out_unsupported_cdb;
	}

3153 3154 3155
	if (cmd->unknown_data_length)
		cmd->data_length = size;

3156
	if (size != cmd->data_length) {
3157
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3158
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3159
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3160 3161 3162 3163 3164
				cmd->data_length, size, cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
3165
			pr_err("Rejecting underflow/overflow"
3166 3167 3168 3169 3170 3171 3172
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_SG_IO_CDB.
		 */
3173 3174
		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3175
				" CDB on non 512-byte sector setup subsystem"
3176
				" plugin: %s\n", dev->transport->name);
3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

3191
	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
3192 3193
	    (sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors ||
	     sectors > dev->se_sub_dev->se_dev_attrib.max_sectors)) {
3194 3195 3196 3197 3198
		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
				   cdb[0], sectors);
		goto out_invalid_cdb_field;
	}

3199 3200 3201 3202 3203
	/* reject any command that we don't have a handler for */
	if (!(passthrough || cmd->execute_task ||
	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
		goto out_unsupported_cdb;

3204 3205 3206 3207 3208 3209
	transport_set_supported_SAM_opcode(cmd);
	return ret;

out_unsupported_cdb:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3210
	return -EINVAL;
3211 3212 3213
out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3214
	return -EINVAL;
3215 3216 3217
}

/*
3218
 * Called from I/O completion to determine which dormant/delayed
3219 3220 3221 3222
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
3223
	struct se_device *dev = cmd->se_dev;
3224 3225 3226
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

3227
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3228 3229 3230
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3231
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3232 3233
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3234
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3235
		dev->dev_cur_ordered_id++;
3236
		pr_debug("Incremented dev_cur_ordered_id: %u for"
3237 3238
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3239
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3240 3241 3242 3243
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
3244
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3245 3246 3247 3248 3249 3250 3251 3252 3253
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
3254
			&dev->delayed_cmd_list, se_delayed_node) {
3255

3256
		list_del(&cmd_p->se_delayed_node);
3257 3258
		spin_unlock(&dev->delayed_cmd_lock);

3259
		pr_debug("Calling add_tasks() for"
3260 3261
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
3262
			cmd_p->t_task_cdb[0],
3263 3264 3265 3266 3267 3268
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

		transport_add_tasks_from_cmd(cmd_p);
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
3269
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3270 3271 3272 3273 3274 3275 3276 3277
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
3278
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3279 3280
}

3281
static void transport_complete_qf(struct se_cmd *cmd)
3282 3283 3284
{
	int ret = 0;

3285 3286 3287 3288 3289 3290 3291 3292
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
3293 3294 3295 3296 3297 3298

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
3299
		if (cmd->t_bidi_data_sg) {
3300 3301
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
3302
				break;
3303 3304 3305 3306 3307 3308 3309 3310 3311
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

3312 3313 3314 3315 3316 3317 3318
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3319 3320 3321 3322
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
3323
	struct se_device *dev)
3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

3334
static void target_complete_ok_work(struct work_struct *work)
3335
{
3336
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3337
	int reason = 0, ret;
3338

3339 3340 3341 3342 3343
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
3344
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3345
		transport_complete_task_attr(cmd);
3346 3347 3348 3349 3350 3351 3352
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		/*
		 * Only set when an struct se_task->task_scsi_status returned
		 * a non GOOD status.
		 */
		if (cmd->scsi_status) {
3366
			ret = transport_send_check_condition_and_sense(
3367
					cmd, reason, 1);
3368
			if (ret == -EAGAIN || ret == -ENOMEM)
3369 3370
				goto queue_full;

3371 3372 3373 3374 3375 3376
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
3377
	 * Check for a callback, used by amongst other things
3378 3379 3380 3381 3382 3383 3384 3385
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3386 3387
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3388 3389 3390 3391
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

3392
		ret = cmd->se_tfo->queue_data_in(cmd);
3393
		if (ret == -EAGAIN || ret == -ENOMEM)
3394
			goto queue_full;
3395 3396 3397
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3398 3399
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3400 3401 3402 3403 3404 3405
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
3406
		if (cmd->t_bidi_data_sg) {
3407
			spin_lock(&cmd->se_lun->lun_sep_lock);
3408 3409
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3410 3411 3412
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
3413
			ret = cmd->se_tfo->queue_data_in(cmd);
3414
			if (ret == -EAGAIN || ret == -ENOMEM)
3415
				goto queue_full;
3416 3417 3418 3419
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
3420
		ret = cmd->se_tfo->queue_status(cmd);
3421
		if (ret == -EAGAIN || ret == -ENOMEM)
3422
			goto queue_full;
3423 3424 3425 3426 3427 3428 3429
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3430 3431 3432
	return;

queue_full:
3433
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3434
		" data_direction: %d\n", cmd, cmd->data_direction);
3435 3436
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
3437 3438 3439 3440 3441 3442
}

static void transport_free_dev_tasks(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
3443
	LIST_HEAD(dispose_list);
3444

3445
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3446
	list_for_each_entry_safe(task, task_tmp,
3447
				&cmd->t_task_list, t_list) {
3448 3449 3450 3451 3452 3453 3454
		if (!(task->task_flags & TF_ACTIVE))
			list_move_tail(&task->t_list, &dispose_list);
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	while (!list_empty(&dispose_list)) {
		task = list_first_entry(&dispose_list, struct se_task, t_list);
3455 3456

		list_del(&task->t_list);
3457
		cmd->se_dev->transport->free_task(task);
3458 3459 3460
	}
}

3461
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3462
{
3463 3464
	struct scatterlist *sg;
	int count;
3465

3466 3467
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
3468

3469 3470
	kfree(sgl);
}
3471

3472 3473 3474 3475 3476 3477
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3478 3479
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
3480

3481
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3482 3483
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
3484 3485
}

C
Christoph Hellwig 已提交
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

3497
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
3498 3499 3500 3501
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
3502 3503
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
3504
	 */
3505 3506 3507 3508
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
3509 3510 3511
	cmd->se_tfo->release_cmd(cmd);
}

3512 3513 3514 3515 3516 3517
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
3518
static void transport_put_cmd(struct se_cmd *cmd)
3519 3520
{
	unsigned long flags;
3521
	int free_tasks = 0;
3522

3523
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3524 3525 3526 3527 3528 3529 3530 3531 3532 3533
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

	if (atomic_read(&cmd->t_se_count)) {
		if (!atomic_dec_and_test(&cmd->t_se_count))
			goto out_busy;
	}

3534 3535
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3536 3537
		transport_all_task_dev_remove_state(cmd);
		free_tasks = 1;
3538
	}
3539
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3540

3541 3542
	if (free_tasks != 0)
		transport_free_dev_tasks(cmd);
3543

3544
	transport_free_pages(cmd);
3545
	transport_release_cmd(cmd);
3546
	return;
3547 3548
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3549 3550 3551
}

/*
3552 3553
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
3565 3566 3567 3568
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
3569
{
3570
	if (!sgl || !sgl_count)
3571 3572 3573 3574
		return 0;

	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
		/*
		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
		 * scatterlists already have been set to follow what the fabric
		 * passes for the original expected data transfer length.
		 */
		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
			pr_warn("Rejecting SCSI DATA overflow for fabric using"
				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
			return -EINVAL;
		}
3587

3588 3589
		cmd->t_data_sg = sgl;
		cmd->t_data_nents = sgl_count;
3590

3591 3592 3593
		if (sgl_bidi && sgl_bidi_count) {
			cmd->t_bidi_data_sg = sgl_bidi;
			cmd->t_bidi_data_nents = sgl_bidi_count;
3594 3595 3596 3597 3598 3599 3600 3601
		}
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	}

	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

3602
void *transport_kmap_data_sg(struct se_cmd *cmd)
3603
{
3604
	struct scatterlist *sg = cmd->t_data_sg;
3605 3606
	struct page **pages;
	int i;
3607

3608
	BUG_ON(!sg);
3609
	/*
3610 3611 3612
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3613
	 */
3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3635
}
3636
EXPORT_SYMBOL(transport_kmap_data_sg);
3637

3638
void transport_kunmap_data_sg(struct se_cmd *cmd)
3639
{
3640
	if (!cmd->t_data_nents) {
3641
		return;
3642
	} else if (cmd->t_data_nents == 1) {
3643
		kunmap(sg_page(cmd->t_data_sg));
3644 3645
		return;
	}
3646 3647 3648

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
3649
}
3650
EXPORT_SYMBOL(transport_kunmap_data_sg);
3651

3652
static int
3653
transport_generic_get_mem(struct se_cmd *cmd)
3654
{
3655 3656 3657
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
3658
	gfp_t zero_flag;
3659
	int i = 0;
3660

3661 3662 3663 3664
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
3665

3666 3667
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
3668

3669 3670
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;

3671 3672
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
3673
		page = alloc_page(GFP_KERNEL | zero_flag);
3674 3675
		if (!page)
			goto out;
3676

3677 3678 3679
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
3680 3681 3682
	}
	return 0;

3683 3684 3685 3686
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
3687
	}
3688 3689 3690
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
3691 3692
}

3693 3694
/* Reduce sectors if they are too long for the device */
static inline sector_t transport_limit_task_sectors(
3695 3696
	struct se_device *dev,
	unsigned long long lba,
3697
	sector_t sectors)
3698
{
3699
	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3700

3701 3702 3703
	if (dev->transport->get_device_type(dev) == TYPE_DISK)
		if ((lba + sectors) > transport_dev_end_lba(dev))
			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3704

3705
	return sectors;
3706 3707
}

3708 3709 3710
/*
 * Break up cmd into chunks transport can handle
 */
3711 3712
static int
transport_allocate_data_tasks(struct se_cmd *cmd,
3713
	enum dma_data_direction data_direction,
3714
	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3715
{
3716
	struct se_device *dev = cmd->se_dev;
3717 3718 3719 3720
	struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
	sector_t sectors;
	struct se_task *task;
	unsigned long flags;
3721 3722 3723 3724

	if (transport_cmd_get_valid_sectors(cmd) < 0)
		return -EINVAL;

3725
	sectors = DIV_ROUND_UP(cmd->data_length, attr->block_size);
3726

3727 3728
	BUG_ON(cmd->data_length % attr->block_size);
	BUG_ON(sectors > attr->max_sectors);
3729

3730 3731 3732
	task = transport_generic_get_task(cmd, data_direction);
	if (!task)
		return -ENOMEM;
3733

3734 3735 3736
	task->task_sg = cmd_sg;
	task->task_sg_nents = sgl_nents;
	task->task_size = cmd->data_length;
3737

3738 3739
	task->task_lba = cmd->t_task_lba;
	task->task_sectors = sectors;
3740

3741 3742 3743
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3744

3745
	return 1;
3746 3747 3748
}

static int
3749
transport_allocate_control_task(struct se_cmd *cmd)
3750 3751
{
	struct se_task *task;
3752
	unsigned long flags;
3753

3754 3755 3756 3757 3758
	/* Workaround for handling zero-length control CDBs */
	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
	    !cmd->data_length)
		return 0;

3759 3760
	task = transport_generic_get_task(cmd, cmd->data_direction);
	if (!task)
3761
		return -ENOMEM;
3762

3763
	task->task_sg = cmd->t_data_sg;
3764
	task->task_size = cmd->data_length;
3765
	task->task_sg_nents = cmd->t_data_nents;
3766

3767 3768 3769
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3770

3771
	/* Success! Return number of tasks allocated */
3772
	return 1;
3773 3774
}

3775
/*
3776 3777 3778
 * Allocate any required resources to execute the command.  For writes we
 * might not have the payload yet, so notify the fabric via a call to
 * ->write_pending instead. Otherwise place it on the execution queue.
3779
 */
3780
int transport_generic_new_cmd(struct se_cmd *cmd)
3781
{
3782
	struct se_device *dev = cmd->se_dev;
3783
	int task_cdbs, task_cdbs_bidi = 0;
3784
	int set_counts = 1;
3785 3786 3787 3788 3789
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3790
	 * beforehand.
3791
	 */
3792 3793
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
3794
		ret = transport_generic_get_mem(cmd);
3795
		if (ret < 0)
3796
			goto out_fail;
3797
	}
3798

3799
	/*
3800
	 * For BIDI command set up the read tasks first.
3801
	 */
3802
	if (cmd->t_bidi_data_sg &&
3803 3804 3805
	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));

3806 3807 3808 3809
		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
				cmd->t_bidi_data_nents);
		if (task_cdbs_bidi <= 0)
3810 3811 3812 3813 3814 3815
			goto out_fail;

		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
		set_counts = 0;
	}
3816 3817 3818 3819 3820 3821 3822 3823 3824

	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
		task_cdbs = transport_allocate_data_tasks(cmd,
					cmd->data_direction, cmd->t_data_sg,
					cmd->t_data_nents);
	} else {
		task_cdbs = transport_allocate_control_task(cmd);
	}

3825
	if (task_cdbs < 0)
3826
		goto out_fail;
3827
	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3828
		spin_lock_irq(&cmd->t_state_lock);
3829
		cmd->t_state = TRANSPORT_COMPLETE;
3830 3831
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
3832 3833 3834 3835 3836 3837 3838 3839

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

3840 3841 3842 3843
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
3844 3845 3846 3847 3848 3849

	if (set_counts) {
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
	}

3850 3851 3852
	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3853

3854
	/*
3855
	 * For WRITEs, let the fabric know its buffer is ready..
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870
	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
	 * will be added to the struct se_device execution queue after its WRITE
	 * data has arrived. (ie: It gets handled by the transport processing
	 * thread a second time)
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
		transport_add_tasks_to_state_queue(cmd);
		return transport_generic_write_pending(cmd);
	}
	/*
	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
	 * to the execution queue.
	 */
	transport_execute_tasks(cmd);
	return 0;
3871 3872 3873 3874 3875

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
3876
}
3877
EXPORT_SYMBOL(transport_generic_new_cmd);
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

3889
static void transport_write_pending_qf(struct se_cmd *cmd)
3890
{
3891 3892 3893 3894
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
3895 3896 3897 3898
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
3899 3900
}

3901 3902 3903 3904 3905
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

3906
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3907
	cmd->t_state = TRANSPORT_WRITE_PENDING;
3908
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3909

3910 3911
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
3912 3913 3914
	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
	 * from HW target mode interrupt code.  This is safe to be called
	 * with transport_off=1 before the cmd->se_tfo->write_pending
3915 3916 3917 3918 3919 3920 3921 3922
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
3923
	ret = cmd->se_tfo->write_pending(cmd);
3924
	if (ret == -EAGAIN || ret == -ENOMEM)
3925 3926
		goto queue_full;
	else if (ret < 0)
3927 3928
		return ret;

3929
	return 1;
3930 3931

queue_full:
3932
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3933
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3934
	transport_handle_queue_full(cmd, cmd->se_dev);
3935
	return 0;
3936 3937
}

3938
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3939
{
3940
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3941
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3942 3943
			 transport_wait_for_tasks(cmd);

3944
		transport_release_cmd(cmd);
3945 3946 3947 3948
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

3949 3950
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

3951
		if (cmd->se_lun)
3952 3953
			transport_lun_remove_cmd(cmd);

3954 3955
		transport_free_dev_tasks(cmd);

3956
		transport_put_cmd(cmd);
3957 3958 3959 3960
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

3961 3962 3963
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
3964
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
3965
 */
3966 3967
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			bool ack_kref)
3968 3969 3970
{
	unsigned long flags;

3971
	kref_init(&se_cmd->cmd_kref);
3972 3973 3974 3975 3976
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
3977
	if (ack_kref == true) {
3978
		kref_get(&se_cmd->cmd_kref);
3979 3980
		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
	}
3981

3982 3983 3984 3985 3986 3987 3988
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_get_sess_cmd);

3989
static void target_release_cmd_kref(struct kref *kref)
3990
{
3991 3992
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
3993 3994 3995 3996 3997
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3998
		se_cmd->se_tfo->release_cmd(se_cmd);
3999
		return;
4000 4001 4002 4003
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
4004
		return;
4005 4006 4007 4008
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

4009 4010 4011 4012 4013 4014 4015 4016 4017 4018
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
}
EXPORT_SYMBOL(target_put_sess_cmd);

/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 * @se_sess:	session to split
 */
void target_splice_sess_cmd_list(struct se_session *se_sess)
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	WARN_ON(!list_empty(&se_sess->sess_wait_list));
	INIT_LIST_HEAD(&se_sess->sess_wait_list);

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	se_sess->sess_tearing_down = 1;

	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);

	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_splice_sess_cmd_list);

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
				&se_sess->sess_wait_list, se_cmd_list) {
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
	int ret;
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
4101
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4102 4103 4104 4105 4106
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
4107
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4108
		transport_cmd_check_stop(cmd, 1, 0);
4109
		return -EPERM;
4110
	}
4111
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
4112
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4113

4114
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4115 4116 4117

	ret = transport_stop_tasks_for_cmd(cmd);

4118 4119
	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
			" %d\n", cmd, cmd->t_task_list_num, ret);
4120
	if (!ret) {
4121
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4122
				cmd->se_tfo->get_task_tag(cmd));
4123
		wait_for_completion(&cmd->transport_lun_stop_comp);
4124
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4125
				cmd->se_tfo->get_task_tag(cmd));
4126
	}
4127
	transport_remove_cmd_from_queue(cmd);
4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4141 4142 4143
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
4144
		list_del_init(&cmd->se_lun_node);
4145

4146 4147 4148 4149 4150
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
4151
		spin_lock(&cmd->t_state_lock);
4152
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4153
			"_lun_stop for  ITT: 0x%08x\n",
4154 4155
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4156
		cmd->transport_state |= CMD_T_LUN_STOP;
4157
		spin_unlock(&cmd->t_state_lock);
4158 4159 4160

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

4161 4162
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4163 4164
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4165 4166 4167 4168 4169 4170
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
4171
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4172 4173
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4174

4175
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4176 4177 4178 4179
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

4180
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4181
			"_wait_for_tasks(): SUCCESS\n",
4182 4183
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4184

4185
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4186
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4187
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4188 4189
			goto check_cond;
		}
4190
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4191
		transport_all_task_dev_remove_state(cmd);
4192
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208

		transport_free_dev_tasks(cmd);
		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
4209
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4210
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4211
			pr_debug("SE_LUN[%d] - Detected FE stop for"
4212 4213
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
4214
				cmd, cmd->se_tfo->get_task_tag(cmd));
4215

4216
			spin_unlock_irqrestore(&cmd->t_state_lock,
4217 4218
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
4219
			complete(&cmd->transport_lun_fe_stop_comp);
4220 4221 4222
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
4223
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4224
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4225

4226
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4227 4228 4229 4230 4231 4232 4233
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
4234
	struct se_lun *lun = p;
4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

4246
	kt = kthread_run(transport_clear_lun_thread, lun,
4247 4248
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
4249
		pr_err("Unable to start clear_lun thread\n");
4250
		return PTR_ERR(kt);
4251 4252 4253 4254 4255 4256
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

4257 4258 4259
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
4260
 *
4261 4262
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
4263
 */
4264
bool transport_wait_for_tasks(struct se_cmd *cmd)
4265 4266 4267
{
	unsigned long flags;

4268
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4269 4270
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4271
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4272
		return false;
4273 4274 4275 4276 4277
	}
	/*
	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
	 * has been set in transport_set_supported_SAM_opcode().
	 */
4278 4279
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4280
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4281
		return false;
4282
	}
4283 4284 4285
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
4286
	 * The cmd->transport_lun_stopped_sem will be upped by
4287 4288 4289
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
4290
	if (cmd->transport_state & CMD_T_LUN_STOP) {
4291
		pr_debug("wait_for_tasks: Stopping"
4292
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4293
			"_stop_comp); for ITT: 0x%08x\n",
4294
			cmd->se_tfo->get_task_tag(cmd));
4295 4296 4297 4298 4299 4300 4301
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
4302 4303 4304 4305
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
4306 4307 4308 4309 4310 4311 4312

		transport_all_task_dev_remove_state(cmd);
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
4313
		pr_debug("wait_for_tasks: Stopped"
4314
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4315
			"stop_comp); for ITT: 0x%08x\n",
4316
			cmd->se_tfo->get_task_tag(cmd));
4317

4318
		cmd->transport_state &= ~CMD_T_LUN_STOP;
4319
	}
4320

4321
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
4322
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4323
		return false;
4324
	}
4325

4326
	cmd->transport_state |= CMD_T_STOP;
4327

4328
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4329
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4330 4331
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4332

4333
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4334

4335
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4336

4337
	wait_for_completion(&cmd->t_transport_stop_comp);
4338

4339
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4340
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4341

4342
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4343
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4344
		cmd->se_tfo->get_task_tag(cmd));
4345

4346
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4347 4348

	return true;
4349
}
4350
EXPORT_SYMBOL(transport_wait_for_tasks);
4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

4384
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4385
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4386
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4387 4388 4389
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4390
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
4403
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4404 4405 4406 4407 4408 4409 4410
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
4411 4412
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4413
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4414 4415 4416 4417 4418
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
4419 4420 4421 4422
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4423
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4424 4425 4426 4427 4428 4429 4430 4431
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4432
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4433 4434 4435 4436 4437 4438 4439 4440
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4441
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4442 4443 4444 4445 4446 4447 4448 4449 4450
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4451
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4452 4453 4454 4455 4456 4457 4458 4459 4460 4461
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4462
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4463 4464
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4465 4466 4467 4468 4469 4470
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4471
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4472 4473
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4474 4475 4476 4477 4478 4479
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4480
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4491
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4492 4493 4494 4495 4496 4497 4498 4499 4500 4501
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4502
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4513
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4514 4515 4516 4517 4518 4519 4520 4521
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4522
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4523 4524 4525 4526 4527 4528 4529 4530 4531
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4532
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4533 4534 4535 4536 4537 4538 4539 4540 4541 4542
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4543
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
4561
	return cmd->se_tfo->queue_status(cmd);
4562 4563 4564 4565 4566 4567 4568
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

4569
	if (cmd->transport_state & CMD_T_ABORTED) {
4570
		if (!send_status ||
4571 4572
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
4573

4574
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4575
			" status for CDB: 0x%02x ITT: 0x%08x\n",
4576
			cmd->t_task_cdb[0],
4577
			cmd->se_tfo->get_task_tag(cmd));
4578

4579
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4580
		cmd->se_tfo->queue_status(cmd);
4581 4582 4583 4584 4585 4586 4587 4588
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
4589 4590 4591 4592 4593 4594 4595 4596 4597
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

4598 4599 4600 4601 4602 4603 4604
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
4605
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4606
			cmd->transport_state |= CMD_T_ABORTED;
4607 4608 4609 4610
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4611

4612
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4613
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4614
		cmd->se_tfo->get_task_tag(cmd));
4615

4616
	cmd->se_tfo->queue_status(cmd);
4617 4618
}

C
Christoph Hellwig 已提交
4619
static int transport_generic_do_tmr(struct se_cmd *cmd)
4620
{
4621
	struct se_device *dev = cmd->se_dev;
4622 4623 4624 4625
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
4626
	case TMR_ABORT_TASK:
4627
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
4628
		break;
4629 4630 4631
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
4632 4633
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
4634
	case TMR_LUN_RESET:
4635 4636 4637 4638
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
4639
	case TMR_TARGET_WARM_RESET:
4640 4641
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4642
	case TMR_TARGET_COLD_RESET:
4643 4644 4645
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
4646
		pr_err("Uknown TMR function: 0x%02x.\n",
4647 4648 4649 4650 4651 4652
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4653
	cmd->se_tfo->queue_tm_rsp(cmd);
4654

4655
	transport_cmd_check_stop_to_fabric(cmd);
4656 4657 4658 4659 4660 4661 4662 4663 4664
	return 0;
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
4665
	int ret;
4666
	struct se_cmd *cmd;
J
Jörn Engel 已提交
4667
	struct se_device *dev = param;
4668 4669

	while (!kthread_should_stop()) {
4670 4671
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4672 4673 4674 4675 4676
				kthread_should_stop());
		if (ret < 0)
			goto out;

get_cmd:
4677 4678
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
4679 4680
			continue;

4681
		switch (cmd->t_state) {
4682 4683 4684
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
4685
		case TRANSPORT_NEW_CMD_MAP:
4686 4687
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
4688 4689 4690
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
4691
			ret = cmd->se_tfo->new_cmd_map(cmd);
4692
			if (ret < 0) {
4693
				transport_generic_request_failure(cmd);
4694 4695 4696
				break;
			}
			ret = transport_generic_new_cmd(cmd);
4697
			if (ret < 0) {
4698 4699
				transport_generic_request_failure(cmd);
				break;
4700 4701 4702 4703 4704 4705 4706 4707
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
4708
		case TRANSPORT_COMPLETE_QF_WP:
4709 4710 4711 4712
			transport_write_pending_qf(cmd);
			break;
		case TRANSPORT_COMPLETE_QF_OK:
			transport_complete_qf(cmd);
4713
			break;
4714
		default:
4715 4716 4717
			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
				"i_state: %d on SE LUN: %u\n",
				cmd->t_state,
4718 4719 4720
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
4721 4722 4723 4724 4725 4726 4727
			BUG();
		}

		goto get_cmd;
	}

out:
4728 4729
	WARN_ON(!list_empty(&dev->state_task_list));
	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4730 4731 4732
	dev->process_thread = NULL;
	return 0;
}