target_core_transport.c 129.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40
#include <linux/ratelimit.h>
41 42 43 44 45
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
46
#include <scsi/scsi_tcq.h>
47 48

#include <target/target_core_base.h>
49 50
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
51 52
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
53
#include "target_core_internal.h"
54 55 56 57
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

58
static int sub_api_initialized;
59

60
static struct workqueue_struct *target_completion_wq;
61 62 63 64 65 66 67 68 69
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static int transport_generic_write_pending(struct se_cmd *);
70
static int transport_processing_thread(void *param);
71
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72
static void transport_complete_task_attr(struct se_cmd *cmd);
73
static void transport_handle_queue_full(struct se_cmd *cmd,
74
		struct se_device *dev);
75
static void transport_free_dev_tasks(struct se_cmd *cmd);
76
static int transport_generic_get_mem(struct se_cmd *cmd);
77
static void transport_put_cmd(struct se_cmd *cmd);
78
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
79
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
80
static void target_complete_ok_work(struct work_struct *work);
81

82
int init_se_kmem_caches(void)
83 84 85 86
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
87 88
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
89
				" failed\n");
90
		goto out;
91 92 93 94
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
95 96
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
97
		goto out_free_sess_cache;
98 99 100 101
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
102 103
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
104
				" failed\n");
105
		goto out_free_ua_cache;
106 107 108 109
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
110 111
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
112
				" failed\n");
113
		goto out_free_pr_reg_cache;
114 115 116 117
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
118 119
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
120
				"cache failed\n");
121
		goto out_free_lu_gp_cache;
122 123 124 125
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
126 127
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
128
				"cache failed\n");
129
		goto out_free_lu_gp_mem_cache;
130 131 132 133 134 135
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
136 137
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
138
				"mem_t failed\n");
139
		goto out_free_tg_pt_gp_cache;
140 141
	}

142 143 144 145 146
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

147
	return 0;
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
163
out:
164
	return -ENOMEM;
165 166
}

167
void release_se_kmem_caches(void)
168
{
169
	destroy_workqueue(target_completion_wq);
170 171 172 173 174 175 176 177 178
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

179 180 181
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182 183 184 185 186 187 188 189

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

190
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
191

192 193 194
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
195 196 197 198

	return new_index;
}

C
Christoph Hellwig 已提交
199
static void transport_init_queue_obj(struct se_queue_obj *qobj)
200 201 202 203 204 205 206
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}

207
void transport_subsystem_check_init(void)
208 209 210
{
	int ret;

211 212 213
	if (sub_api_initialized)
		return;

214 215
	ret = request_module("target_core_iblock");
	if (ret != 0)
216
		pr_err("Unable to load target_core_iblock\n");
217 218 219

	ret = request_module("target_core_file");
	if (ret != 0)
220
		pr_err("Unable to load target_core_file\n");
221 222 223

	ret = request_module("target_core_pscsi");
	if (ret != 0)
224
		pr_err("Unable to load target_core_pscsi\n");
225 226 227

	ret = request_module("target_core_stgt");
	if (ret != 0)
228
		pr_err("Unable to load target_core_stgt\n");
229

230
	sub_api_initialized = 1;
231
	return;
232 233 234 235 236 237 238
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
239 240
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
241 242 243 244 245
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
246 247 248
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
249
	kref_init(&se_sess->sess_kref);
250 251 252 253 254 255

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
256
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
279
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
280
			memset(&buf[0], 0, PR_REG_ISID_LEN);
281
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
282 283 284
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
285 286
		kref_get(&se_nacl->acl_kref);

287 288 289 290 291 292 293 294 295 296 297 298 299
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

300
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
301
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
302 303 304 305 306 307 308 309 310
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
311 312 313
	unsigned long flags;

	spin_lock_irqsave(&se_tpg->session_lock, flags);
314
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
315
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
316 317 318
}
EXPORT_SYMBOL(transport_register_session);

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static void target_release_session(struct kref *kref)
{
	struct se_session *se_sess = container_of(kref,
			struct se_session, sess_kref);
	struct se_portal_group *se_tpg = se_sess->se_tpg;

	se_tpg->se_tpg_tfo->close_session(se_sess);
}

void target_get_session(struct se_session *se_sess)
{
	kref_get(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);

int target_put_session(struct se_session *se_sess)
{
	return kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);

340 341 342 343 344 345 346 347 348 349 350 351 352
static void target_complete_nacl(struct kref *kref)
{
	struct se_node_acl *nacl = container_of(kref,
				struct se_node_acl, acl_kref);

	complete(&nacl->acl_free_comp);
}

void target_put_nacl(struct se_node_acl *nacl)
{
	kref_put(&nacl->acl_kref, target_complete_nacl);
}

353 354 355
void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
356
	unsigned long flags;
357 358 359 360
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
361
	if (se_nacl) {
362
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
363 364
		if (se_nacl->acl_stop == 0)
			list_del(&se_sess->sess_acl_list);
365 366 367 368 369 370 371 372 373 374 375 376
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
377
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
378 379 380 381 382 383 384 385 386 387 388 389 390
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
391
	struct target_core_fabric_ops *se_tfo;
392
	struct se_node_acl *se_nacl;
393
	unsigned long flags;
394
	bool comp_nacl = true;
395

396
	if (!se_tpg) {
397 398 399
		transport_free_session(se_sess);
		return;
	}
400
	se_tfo = se_tpg->se_tpg_tfo;
401

402
	spin_lock_irqsave(&se_tpg->session_lock, flags);
403 404 405
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
406
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
407 408 409 410 411 412

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
413 414 415 416 417 418 419 420 421 422 423 424 425

	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
	if (se_nacl && se_nacl->dynamic_node_acl) {
		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
			list_del(&se_nacl->acl_list);
			se_tpg->num_node_acls--;
			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
			core_tpg_wait_for_nacl_pr_ref(se_nacl);
			core_free_device_list_for_node(se_nacl, se_tpg);
			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);

			comp_nacl = false;
			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
426 427
		}
	}
428
	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
429

430
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
431
		se_tpg->se_tpg_tfo->get_fabric_name());
432
	/*
433 434 435
	 * If last kref is dropping now for an explict NodeACL, awake sleeping
	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
	 * removal context.
436 437
	 */
	if (se_nacl && comp_nacl == true)
438
		target_put_nacl(se_nacl);
439

440
	transport_free_session(se_sess);
441 442 443 444
}
EXPORT_SYMBOL(transport_deregister_session);

/*
445
 * Called with cmd->t_state_lock held.
446 447 448
 */
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
449
	struct se_device *dev = cmd->se_dev;
450 451 452
	struct se_task *task;
	unsigned long flags;

453 454
	if (!dev)
		return;
455

456
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
457
		if (task->task_flags & TF_ACTIVE)
458 459 460
			continue;

		spin_lock_irqsave(&dev->execute_task_lock, flags);
461 462 463
		if (task->t_state_active) {
			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
				cmd->se_tfo->get_task_tag(cmd), dev, task);
464

465 466 467 468 469
			list_del(&task->t_state_list);
			atomic_dec(&cmd->t_task_cdbs_ex_left);
			task->t_state_active = false;
		}
		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
470
	}
471

472 473 474 475
}

/*	transport_cmd_check_stop():
 *
476
 *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
477 478 479 480 481 482 483 484 485 486 487 488
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

489
	spin_lock_irqsave(&cmd->t_state_lock, flags);
490 491 492 493
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
494 495 496
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
497

498
		cmd->transport_state &= ~CMD_T_ACTIVE;
499 500
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);
501
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
502

503
		complete(&cmd->transport_lun_stop_comp);
504 505 506 507
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
508
	 * this command for frontend exceptions.
509
	 */
510 511 512
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
513
			cmd->se_tfo->get_task_tag(cmd));
514 515 516 517 518 519 520 521 522 523

		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
524
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
525

526
		complete(&cmd->t_transport_stop_comp);
527 528 529
		return 1;
	}
	if (transport_off) {
530
		cmd->transport_state &= ~CMD_T_ACTIVE;
531 532 533 534 535 536 537 538 539
		if (transport_off == 2) {
			transport_all_task_dev_remove_state(cmd);
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
540
			 * their internally allocated I/O reference now and
541
			 * struct se_cmd now.
542 543 544 545
			 *
			 * Fabric modules are expected to return '1' here if the
			 * se_cmd being passed is released at this point,
			 * or zero if not being released.
546
			 */
547
			if (cmd->se_tfo->check_stop_free != NULL) {
548
				spin_unlock_irqrestore(
549
					&cmd->t_state_lock, flags);
550

551
				return cmd->se_tfo->check_stop_free(cmd);
552 553
			}
		}
554
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555 556 557 558

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
559
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
560 561 562 563 564 565 566 567 568 569 570

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
571
	struct se_lun *lun = cmd->se_lun;
572 573 574 575 576
	unsigned long flags;

	if (!lun)
		return;

577
	spin_lock_irqsave(&cmd->t_state_lock, flags);
578 579 580
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
		transport_all_task_dev_remove_state(cmd);
581
	}
582
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
583 584

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
585 586
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
587 588 589 590 591
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
592
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
593
		transport_lun_remove_cmd(cmd);
594 595 596

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
597
	if (remove) {
598
		transport_remove_cmd_from_queue(cmd);
599
		transport_put_cmd(cmd);
600
	}
601 602
}

603 604
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
		bool at_head)
605 606
{
	struct se_device *dev = cmd->se_dev;
607
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
608 609 610
	unsigned long flags;

	if (t_state) {
611
		spin_lock_irqsave(&cmd->t_state_lock, flags);
612
		cmd->t_state = t_state;
613
		cmd->transport_state |= CMD_T_ACTIVE;
614
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
615 616 617
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
618 619 620 621 622 623 624

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

625
	if (at_head)
626
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
627
	else
628
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
629
	cmd->transport_state |= CMD_T_QUEUED;
630 631 632 633 634
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

635 636
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
637
{
638
	struct se_cmd *cmd;
639 640 641 642 643 644 645
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
646
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
647

648
	cmd->transport_state &= ~CMD_T_QUEUED;
649
	list_del_init(&cmd->se_queue_node);
650 651 652
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

653
	return cmd;
654 655
}

656
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
657
{
658
	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
659 660 661
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
662
	if (!(cmd->transport_state & CMD_T_QUEUED)) {
663 664 665
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
666
	cmd->transport_state &= ~CMD_T_QUEUED;
667 668
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
669 670 671 672 673 674 675 676 677
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}

/*
 * Completion function used by TCM subsystem plugins (such as FILEIO)
 * for queueing up response from struct se_subsystem_api->do_task()
 */
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
678
	struct se_task *task = list_entry(cmd->t_task_list.next,
679 680 681 682 683 684 685
				struct se_task, t_list);

	if (good) {
		cmd->scsi_status = SAM_STAT_GOOD;
		task->task_scsi_status = GOOD;
	} else {
		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
686 687 688
		task->task_se_cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

689 690 691 692 693 694
	}

	transport_complete_task(task, good);
}
EXPORT_SYMBOL(transport_complete_sync_cache);

695 696 697 698
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

699
	transport_generic_request_failure(cmd);
700 701
}

702 703 704 705 706 707 708
/*	transport_complete_task():
 *
 *	Called from interrupt and non interrupt context depending
 *	on the transport plugin.
 */
void transport_complete_task(struct se_task *task, int success)
{
709
	struct se_cmd *cmd = task->task_se_cmd;
710
	struct se_device *dev = cmd->se_dev;
711 712
	unsigned long flags;

713
	spin_lock_irqsave(&cmd->t_state_lock, flags);
714
	task->task_flags &= ~TF_ACTIVE;
715 716 717 718 719 720 721 722 723

	/*
	 * See if any sense data exists, if so set the TASK_SENSE flag.
	 * Also check for any other post completion work that needs to be
	 * done by the plugins.
	 */
	if (dev && dev->transport->transport_complete) {
		if (dev->transport->transport_complete(task) != 0) {
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
724
			task->task_flags |= TF_HAS_SENSE;
725 726 727 728 729 730 731 732
			success = 1;
		}
	}

	/*
	 * See if we are waiting for outstanding struct se_task
	 * to complete for an exception condition
	 */
733
	if (task->task_flags & TF_REQUEST_STOP) {
734
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
735 736 737
		complete(&task->task_stop_comp);
		return;
	}
738 739

	if (!success)
740
		cmd->transport_state |= CMD_T_FAILED;
741

742 743 744 745 746
	/*
	 * Decrement the outstanding t_task_cdbs_left count.  The last
	 * struct se_task from struct se_cmd will complete itself into the
	 * device queue depending upon int success.
	 */
747
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
748
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
749 750
		return;
	}
751 752 753 754 755 756 757 758 759 760
	/*
	 * Check for case where an explict ABORT_TASK has been received
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->t_transport_stop_comp);
		return;
	} else if (cmd->transport_state & CMD_T_FAILED) {
761
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
762
		INIT_WORK(&cmd->work, target_complete_failure_work);
763
	} else {
764
		INIT_WORK(&cmd->work, target_complete_ok_work);
765
	}
766 767

	cmd->t_state = TRANSPORT_COMPLETE;
768
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
769
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770

771
	queue_work(target_completion_wq, &cmd->work);
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
}
EXPORT_SYMBOL(transport_complete_task);

/*
 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
 * struct se_task list are ready to be added to the active execution list
 * struct se_device

 * Called with se_dev_t->execute_task_lock called.
 */
static inline int transport_add_task_check_sam_attr(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	/*
	 * No SAM Task attribute emulation enabled, add to tail of
	 * execution queue
	 */
	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
		return 0;
	}
	/*
	 * HEAD_OF_QUEUE attribute for received CDB, which means
	 * the first task that is associated with a struct se_cmd goes to
	 * head of the struct se_device->execute_task_list, and task_prev
	 * after that for each subsequent task
	 */
801
	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
802 803 804 805 806
		list_add(&task->t_execute_list,
				(task_prev != NULL) ?
				&task_prev->t_execute_list :
				&dev->execute_task_list);

807
		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
808
				" in execution queue\n",
809
				task->task_se_cmd->t_task_cdb[0]);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
		return 1;
	}
	/*
	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
	 * transitioned from Dermant -> Active state, and are added to the end
	 * of the struct se_device->execute_task_list
	 */
	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
	return 0;
}

/*	__transport_add_task_to_execute_queue():
 *
 *	Called with se_dev_t->execute_task_lock called.
 */
static void __transport_add_task_to_execute_queue(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	int head_of_queue;

	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
	atomic_inc(&dev->execute_tasks);

835
	if (task->t_state_active)
836 837 838 839 840 841 842 843 844 845 846 847 848
		return;
	/*
	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
	 * state list as well.  Running with SAM Task Attribute emulation
	 * will always return head_of_queue == 0 here
	 */
	if (head_of_queue)
		list_add(&task->t_state_list, (task_prev) ?
				&task_prev->t_state_list :
				&dev->state_task_list);
	else
		list_add_tail(&task->t_state_list, &dev->state_task_list);

849
	task->t_state_active = true;
850

851
	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
852
		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
853 854 855 856 857
		task, dev);
}

static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
858
	struct se_device *dev = cmd->se_dev;
859 860 861
	struct se_task *task;
	unsigned long flags;

862 863
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
864
		spin_lock(&dev->execute_task_lock);
865 866 867 868 869 870 871 872 873
		if (!task->t_state_active) {
			list_add_tail(&task->t_state_list,
				      &dev->state_task_list);
			task->t_state_active = true;

			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
				task->task_se_cmd->se_tfo->get_task_tag(
				task->task_se_cmd), task, dev);
		}
874 875
		spin_unlock(&dev->execute_task_lock);
	}
876
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
877 878
}

879
static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
880
{
881
	struct se_device *dev = cmd->se_dev;
882 883
	struct se_task *task, *task_prev = NULL;

884
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
885
		if (!list_empty(&task->t_execute_list))
886 887 888 889 890 891 892 893
			continue;
		/*
		 * __transport_add_task_to_execute_queue() handles the
		 * SAM Task Attribute emulation if enabled
		 */
		__transport_add_task_to_execute_queue(task, task_prev, dev);
		task_prev = task;
	}
894 895 896 897 898 899 900 901 902
}

static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
	unsigned long flags;
	struct se_device *dev = cmd->se_dev;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
	__transport_add_tasks_from_cmd(cmd);
903 904 905
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

906 907 908 909 910 911 912
void __transport_remove_task_from_execute_queue(struct se_task *task,
		struct se_device *dev)
{
	list_del_init(&task->t_execute_list);
	atomic_dec(&dev->execute_tasks);
}

C
Christoph Hellwig 已提交
913
static void transport_remove_task_from_execute_queue(
914 915 916 917 918
	struct se_task *task,
	struct se_device *dev)
{
	unsigned long flags;

919
	if (WARN_ON(list_empty(&task->t_execute_list)))
920 921
		return;

922
	spin_lock_irqsave(&dev->execute_task_lock, flags);
923
	__transport_remove_task_from_execute_queue(task, dev);
924 925 926
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

927
/*
928
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
929 930 931 932 933 934
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
935
	LIST_HEAD(qf_cmd_list);
936 937 938
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
939 940
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
941

942
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
943 944 945 946
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

947
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
948
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
949
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
950 951
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
952 953

		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
954 955 956
	}
}

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

1000 1001
	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
		atomic_read(&dev->execute_tasks), dev->queue_depth);
1002
	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1003
		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1057
		pr_debug("%s", buf);
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1082 1083
	int ret = 0;
	int len;
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1100
		ret = -EINVAL;
1101 1102 1103 1104 1105 1106
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1107
		pr_debug("%s", buf);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1130 1131
	int ret = 0;
	int len;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1158
		ret = -EINVAL;
1159 1160 1161
		break;
	}

1162 1163 1164
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1165
		strncpy(p_buf, buf, p_buf_len);
1166
	} else {
1167
		pr_debug("%s", buf);
1168
	}
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1211
		ret = -EINVAL;
1212 1213 1214 1215 1216 1217
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1218
		pr_debug("%s", buf);
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1269
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1270 1271 1272 1273 1274
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1275
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1276 1277
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1278 1279 1280 1281
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1282
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1283
	char buf[17];
1284 1285 1286 1287 1288 1289
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1290
			buf[i] = wwn->vendor[i];
1291
		else
1292 1293 1294
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1295 1296 1297

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1298
			buf[i] = wwn->model[i];
1299
		else
1300 1301 1302
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1303 1304 1305

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1306
			buf[i] = wwn->revision[i];
1307
		else
1308 1309 1310
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1311

1312
	device_type = dev->transport->get_device_type(dev);
1313 1314
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1315
				dev->transport->get_device_rev(dev));
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1328
	int force_pt;
1329 1330 1331
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1332 1333
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1334 1335 1336
		return NULL;
	}

1337
	transport_init_queue_obj(&dev->dev_queue_obj);
1338 1339
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1340
	dev->dev_ptr		= transport_dev;
1341 1342 1343 1344 1345 1346 1347 1348 1349
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->execute_task_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_task_list);
1350
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1351 1352 1353 1354 1355 1356
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1357
	spin_lock_init(&dev->qf_cmd_lock);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1392
					  "LIO_%s", dev->transport->name);
1393
	if (IS_ERR(dev->process_thread)) {
1394
		pr_err("Unable to create kthread: LIO_%s\n",
1395
			dev->transport->name);
1396 1397
		goto out;
	}
1398 1399 1400 1401
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1402 1403 1404 1405 1406 1407 1408 1409
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1410
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1411
		if (!inquiry_prod || !inquiry_rev) {
1412
			pr_err("All non TCM/pSCSI plugins require"
1413 1414 1415 1416
				" INQUIRY consts\n");
			goto out;
		}

1417 1418 1419
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1420 1421 1422
	}
	scsi_dump_inquiry(dev);

1423
	return dev;
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

static struct se_task *
transport_generic_get_task(struct se_cmd *cmd,
		enum dma_data_direction data_direction)
{
	struct se_task *task;
1472
	struct se_device *dev = cmd->se_dev;
1473

1474
	task = dev->transport->alloc_task(cmd->t_task_cdb);
1475
	if (!task) {
1476
		pr_err("Unable to allocate struct se_task\n");
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
		return NULL;
	}

	INIT_LIST_HEAD(&task->t_list);
	INIT_LIST_HEAD(&task->t_execute_list);
	INIT_LIST_HEAD(&task->t_state_list);
	init_completion(&task->task_stop_comp);
	task->task_se_cmd = cmd;
	task->task_data_direction = data_direction;

	return task;
}

static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1505 1506
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1507
	INIT_LIST_HEAD(&cmd->se_qf_node);
1508
	INIT_LIST_HEAD(&cmd->se_queue_node);
1509
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1510 1511 1512 1513
	INIT_LIST_HEAD(&cmd->t_task_list);
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1514
	init_completion(&cmd->cmd_wait_comp);
1515
	spin_lock_init(&cmd->t_state_lock);
1516
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1533
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1534 1535
		return 0;

1536
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1537
		pr_debug("SAM Task Attribute ACA"
1538
			" emulation is not supported\n");
1539
		return -EINVAL;
1540 1541 1542 1543 1544
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1545
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1546
	smp_mb__after_atomic_inc();
1547
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1548
			cmd->se_ordered_id, cmd->sam_task_attr,
1549
			cmd->se_dev->transport->name);
1550 1551 1552
	return 0;
}

1553
/*	target_setup_cmd_from_cdb():
1554 1555 1556
 *
 *	Called from fabric RX Thread.
 */
1557
int target_setup_cmd_from_cdb(
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
	struct se_cmd *cmd,
	unsigned char *cdb)
{
	int ret;

	transport_generic_prepare_cdb(cdb);
	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1569
		pr_err("Received SCSI CDB with command_size: %d that"
1570 1571
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1572 1573
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1574
		return -EINVAL;
1575 1576 1577 1578 1579 1580
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1581 1582
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1583
						GFP_KERNEL);
1584 1585
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1586
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1587
				scsi_command_size(cdb),
1588
				(unsigned long)sizeof(cmd->__t_task_cdb));
1589 1590 1591
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1592
			return -ENOMEM;
1593 1594
		}
	} else
1595
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1596
	/*
1597
	 * Copy the original CDB into cmd->
1598
	 */
1599
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1600 1601 1602
	/*
	 * Setup the received CDB based on SCSI defined opcodes and
	 * perform unit attention, persistent reservations and ALUA
1603
	 * checks for virtual device backends.  The cmd->t_task_cdb
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	 * pointer is expected to be setup before we reach this point.
	 */
	ret = transport_generic_cmd_sequencer(cmd, cdb);
	if (ret < 0)
		return ret;
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1615
		return -EINVAL;
1616 1617 1618 1619 1620 1621 1622
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
1623
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1624

1625 1626 1627 1628 1629 1630 1631
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1632 1633
	int ret;

1634 1635
	if (!cmd->se_lun) {
		dump_stack();
1636
		pr_err("cmd->se_lun is NULL\n");
1637 1638 1639 1640
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1641
		pr_err("transport_generic_handle_cdb cannot be called"
1642 1643 1644
				" from interrupt context\n");
		return -EINVAL;
	}
1645
	/*
1646
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1647 1648
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
1649
	 * correctly during shutdown via transport_wait_for_tasks()
1650 1651 1652 1653 1654
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1655 1656
	cmd->transport_state |= CMD_T_ACTIVE;

1657 1658 1659 1660 1661 1662
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1663 1664 1665
	if (ret < 0)
		transport_generic_request_failure(cmd);

1666
	return 0;
1667 1668 1669
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1686
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
1704 1705
	if (flags & TARGET_SCF_UNKNOWN_SIZE)
		se_cmd->unknown_data_length = 1;
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1721 1722 1723 1724 1725 1726
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
		return;
	}
1727 1728 1729 1730
	/*
	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
	 * allocate the necessary tasks to complete the received CDB+data
	 */
1731
	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1732 1733 1734 1735
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
		return;
	}
1736 1737 1738 1739 1740 1741 1742
	/*
	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
	 * for immediate execution of READs, otherwise wait for
	 * transport_generic_handle_data() to be called for WRITEs
	 * when fabric has filled the incoming buffer.
	 */
	transport_handle_cdb_direct(se_cmd);
1743
	return;
1744 1745 1746
}
EXPORT_SYMBOL(target_submit_cmd);

1747 1748 1749 1750 1751 1752 1753 1754 1755
static void target_complete_tmr_failure(struct work_struct *work)
{
	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);

	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
	transport_generic_free_cmd(se_cmd, 0);
}

1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1766 1767
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
1768
 * @flags: submit cmd flags
1769 1770 1771 1772
 *
 * Callable from all contexts.
 **/

1773
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1774
		unsigned char *sense, u32 unpacked_lun,
1775 1776
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, unsigned int tag, int flags)
1777 1778 1779 1780 1781 1782 1783 1784 1785
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1786 1787 1788 1789
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
1790
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1791 1792
	if (ret < 0)
		return -ENOMEM;
1793

1794 1795 1796
	if (tm_type == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = tag;

1797 1798 1799 1800 1801
	/* See target_submit_cmd for commentary */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1802 1803 1804 1805 1806 1807
		/*
		 * For callback during failure handling, push this work off
		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
		 */
		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
		schedule_work(&se_cmd->work);
1808
		return 0;
1809 1810
	}
	transport_generic_handle_tmr(se_cmd);
1811
	return 0;
1812 1813 1814
}
EXPORT_SYMBOL(target_submit_tmr);

1815 1816 1817 1818 1819 1820 1821 1822
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1823
	if (!cmd->se_lun) {
1824
		dump_stack();
1825
		pr_err("cmd->se_lun is NULL\n");
1826
		return -EINVAL;
1827 1828
	}

1829
	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1848
		return -EPERM;
1849 1850 1851 1852
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1853
	 * fabric module as we are expecting no further incoming DATA OUT
1854 1855 1856 1857 1858
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

1859
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
1871
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1872 1873 1874 1875
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
/*
 * If the task is active, request it to be stopped and sleep until it
 * has completed.
 */
bool target_stop_task(struct se_task *task, unsigned long *flags)
{
	struct se_cmd *cmd = task->task_se_cmd;
	bool was_active = false;

	if (task->task_flags & TF_ACTIVE) {
		task->task_flags |= TF_REQUEST_STOP;
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

		pr_debug("Task %p waiting to complete\n", task);
		wait_for_completion(&task->task_stop_comp);
		pr_debug("Task %p stopped successfully\n", task);

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
		atomic_dec(&cmd->t_task_cdbs_left);
		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
		was_active = true;
	}

	return was_active;
}

1902 1903 1904 1905 1906 1907
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
	int ret = 0;

1908
	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1909
		cmd->se_tfo->get_task_tag(cmd));
1910 1911 1912 1913

	/*
	 * No tasks remain in the execution queue
	 */
1914
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1915
	list_for_each_entry_safe(task, task_tmp,
1916
				&cmd->t_task_list, t_list) {
1917
		pr_debug("Processing task %p\n", task);
1918 1919 1920 1921
		/*
		 * If the struct se_task has not been sent and is not active,
		 * remove the struct se_task from the execution queue.
		 */
1922
		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1923
			spin_unlock_irqrestore(&cmd->t_state_lock,
1924 1925
					flags);
			transport_remove_task_from_execute_queue(task,
1926
					cmd->se_dev);
1927

1928
			pr_debug("Task %p removed from execute queue\n", task);
1929
			spin_lock_irqsave(&cmd->t_state_lock, flags);
1930 1931 1932
			continue;
		}

1933
		if (!target_stop_task(task, &flags)) {
1934
			pr_debug("Task %p - did nothing\n", task);
1935 1936 1937
			ret++;
		}
	}
1938
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1939 1940 1941 1942 1943 1944 1945

	return ret;
}

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1946
void transport_generic_request_failure(struct se_cmd *cmd)
1947
{
1948 1949
	int ret = 0;

1950
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1951
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1952
		cmd->t_task_cdb[0]);
1953
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1954
		cmd->se_tfo->get_cmd_state(cmd),
1955
		cmd->t_state, cmd->scsi_sense_reason);
1956
	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1957
		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1958 1959
		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
		cmd->t_task_list_num,
1960 1961 1962
		atomic_read(&cmd->t_task_cdbs_left),
		atomic_read(&cmd->t_task_cdbs_sent),
		atomic_read(&cmd->t_task_cdbs_ex_left),
1963 1964 1965
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1966 1967 1968 1969 1970 1971 1972

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1984
		break;
1985
	case TCM_RESERVATION_CONFLICT:
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
2000 2001 2002
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2003 2004 2005
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

2006
		ret = cmd->se_tfo->queue_status(cmd);
2007
		if (ret == -EAGAIN || ret == -ENOMEM)
2008
			goto queue_full;
2009 2010
		goto check_stop;
	default:
2011
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2012
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
2013 2014 2015
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
2016 2017 2018 2019 2020 2021 2022
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
2023 2024 2025 2026
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
2027

2028 2029
check_stop:
	transport_lun_remove_cmd(cmd);
2030
	if (!transport_cmd_check_stop_to_fabric(cmd))
2031
		;
2032 2033 2034
	return;

queue_full:
2035 2036
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
2037
}
2038
EXPORT_SYMBOL(transport_generic_request_failure);
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076

static inline u32 transport_lba_21(unsigned char *cdb)
{
	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}

static inline u32 transport_lba_32(unsigned char *cdb)
{
	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}

static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

/*
 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 */
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
	unsigned long flags;

2077
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2078
	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2079
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
}

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
2091
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2092 2093
		return 1;
	/*
L
Lucas De Marchi 已提交
2094
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2095 2096
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
2097
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2098
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
2099
			" 0x%02x, se_ordered_id: %u\n",
2100
			cmd->t_task_cdb[0],
2101 2102
			cmd->se_ordered_id);
		return 1;
2103
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2104
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
2105 2106
		smp_mb__after_atomic_inc();

2107
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2108
				" list, se_ordered_id: %u\n",
2109
				cmd->t_task_cdb[0],
2110 2111 2112 2113 2114 2115
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
2116
		if (!atomic_read(&cmd->se_dev->simple_cmds))
2117 2118 2119 2120 2121
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
2122
		atomic_inc(&cmd->se_dev->simple_cmds);
2123 2124 2125 2126 2127 2128 2129
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2130
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2131 2132
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2133
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2134
		 */
2135
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2136
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2137 2138 2139
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2140

2141
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2142
			" delayed CMD list, se_ordered_id: %u\n",
2143
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
static int transport_execute_tasks(struct se_cmd *cmd)
{
	int add_tasks;
2164
	struct se_device *se_dev = cmd->se_dev;
2165 2166
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2167
	 * has occurred that prevents execution.
2168
	 */
2169
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2170 2171 2172 2173 2174
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2175
		if (!add_tasks)
2176 2177
			goto execute_tasks;
		/*
2178 2179 2180
		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
		 * adds associated se_tasks while holding dev->execute_task_lock
		 * before I/O dispath to avoid a double spinlock access.
2181
		 */
2182 2183
		__transport_execute_tasks(se_dev, cmd);
		return 0;
2184
	}
2185

2186
execute_tasks:
2187
	__transport_execute_tasks(se_dev, NULL);
2188 2189 2190 2191 2192 2193 2194 2195 2196
	return 0;
}

/*
 * Called to check struct se_device tcq depth window, and once open pull struct se_task
 * from struct se_device->execute_task_list and
 *
 * Called from transport_processing_thread()
 */
2197
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2198 2199 2200
{
	int error;
	struct se_cmd *cmd = NULL;
2201
	struct se_task *task = NULL;
2202 2203 2204
	unsigned long flags;

check_depth:
2205
	spin_lock_irq(&dev->execute_task_lock);
2206 2207 2208
	if (new_cmd != NULL)
		__transport_add_tasks_from_cmd(new_cmd);

2209 2210
	if (list_empty(&dev->execute_task_list)) {
		spin_unlock_irq(&dev->execute_task_lock);
2211 2212
		return 0;
	}
2213 2214
	task = list_first_entry(&dev->execute_task_list,
				struct se_task, t_execute_list);
2215
	__transport_remove_task_from_execute_queue(task, dev);
2216
	spin_unlock_irq(&dev->execute_task_lock);
2217

2218
	cmd = task->task_se_cmd;
2219
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2220
	task->task_flags |= (TF_ACTIVE | TF_SENT);
2221
	atomic_inc(&cmd->t_task_cdbs_sent);
2222

2223 2224
	if (atomic_read(&cmd->t_task_cdbs_sent) ==
	    cmd->t_task_list_num)
2225
		cmd->transport_state |= CMD_T_SENT;
2226

2227
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2228

2229 2230 2231 2232
	if (cmd->execute_task)
		error = cmd->execute_task(task);
	else
		error = dev->transport->do_task(task);
2233 2234 2235
	if (error != 0) {
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		task->task_flags &= ~TF_ACTIVE;
2236
		cmd->transport_state &= ~CMD_T_SENT;
2237
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2238

2239
		transport_stop_tasks_for_cmd(cmd);
2240
		transport_generic_request_failure(cmd);
2241 2242
	}

2243
	new_cmd = NULL;
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
	goto check_depth;

	return 0;
}

static inline u32 transport_get_sectors_6(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2254
	struct se_device *dev = cmd->se_dev;
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 8-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2266
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2267 2268 2269 2270
		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
2271 2272 2273 2274 2275 2276
	 * Use 8-bit sector value.  SBC-3 says:
	 *
	 *   A TRANSFER LENGTH field set to zero specifies that 256
	 *   logical blocks shall be written.  Any other value
	 *   specifies the number of logical blocks that shall be
	 *   written.
2277 2278
	 */
type_disk:
2279
	return cdb[4] ? : 256;
2280 2281 2282 2283 2284 2285 2286
}

static inline u32 transport_get_sectors_10(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2287
	struct se_device *dev = cmd->se_dev;
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 16-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_10 is not defined in SSC, throw an exception
	 */
2299 2300
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 16-bit sector value.
	 */
type_disk:
	return (u32)(cdb[7] << 8) + cdb[8];
}

static inline u32 transport_get_sectors_12(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2317
	struct se_device *dev = cmd->se_dev;
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_12 is not defined in SSC, throw an exception
	 */
2329 2330
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 32-bit sector value.
	 */
type_disk:
	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}

static inline u32 transport_get_sectors_16(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2347
	struct se_device *dev = cmd->se_dev;
2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2359
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];

type_disk:
	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
		    (cdb[12] << 8) + cdb[13];
}

/*
 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 */
static inline u32 transport_get_sectors_32(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
		    (cdb[30] << 8) + cdb[31];

}

static inline u32 transport_get_size(
	u32 sectors,
	unsigned char *cdb,
	struct se_cmd *cmd)
{
2389
	struct se_device *dev = cmd->se_dev;
2390

2391
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2392
		if (cdb[1] & 1) { /* sectors */
2393
			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2394 2395 2396
		} else /* bytes */
			return sectors;
	}
2397

2398
	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2399 2400 2401 2402
		" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
		sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
		dev->transport->name);

2403
	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2404 2405 2406 2407 2408
}

static void transport_xor_callback(struct se_cmd *cmd)
{
	unsigned char *buf, *addr;
2409
	struct scatterlist *sg;
2410 2411
	unsigned int offset;
	int i;
2412
	int count;
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
	/*
	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
	 *
	 * 1) read the specified logical block(s);
	 * 2) transfer logical blocks from the data-out buffer;
	 * 3) XOR the logical blocks transferred from the data-out buffer with
	 *    the logical blocks read, storing the resulting XOR data in a buffer;
	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
	 *    blocks transferred from the data-out buffer; and
	 * 5) transfer the resulting XOR data to the data-in buffer.
	 */
	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2425 2426
	if (!buf) {
		pr_err("Unable to allocate xor_callback buf\n");
2427 2428 2429
		return;
	}
	/*
2430
	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2431 2432
	 * into the locally allocated *buf
	 */
2433 2434 2435 2436 2437
	sg_copy_to_buffer(cmd->t_data_sg,
			  cmd->t_data_nents,
			  buf,
			  cmd->data_length);

2438 2439
	/*
	 * Now perform the XOR against the BIDI read memory located at
2440
	 * cmd->t_mem_bidi_list
2441 2442 2443
	 */

	offset = 0;
2444
	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2445
		addr = kmap_atomic(sg_page(sg));
2446
		if (!addr)
2447 2448
			goto out;

2449 2450
		for (i = 0; i < sg->length; i++)
			*(addr + sg->offset + i) ^= *(buf + offset + i);
2451

2452
		offset += sg->length;
2453
		kunmap_atomic(addr);
2454
	}
2455

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
out:
	kfree(buf);
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2466
	struct se_device *dev = cmd->se_dev;
2467 2468 2469 2470
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;
	u32 offset = 0;

2471 2472
	WARN_ON(!cmd->se_lun);

2473 2474 2475
	if (!dev)
		return 0;

2476
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2477
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2478
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2479 2480 2481 2482
		return 0;
	}

	list_for_each_entry_safe(task, task_tmp,
2483
				&cmd->t_task_list, t_list) {
2484
		if (!(task->task_flags & TF_HAS_SENSE))
2485 2486
			continue;

2487
		if (!dev->transport->get_sense_buffer) {
2488
			pr_err("dev->transport->get_sense_buffer"
2489 2490 2491 2492
					" is NULL\n");
			continue;
		}

2493
		sense_buffer = dev->transport->get_sense_buffer(task);
2494
		if (!sense_buffer) {
2495
			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2496
				" sense buffer for task with sense\n",
2497
				cmd->se_tfo->get_task_tag(cmd), task);
2498 2499
			continue;
		}
2500
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2501

2502
		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2503 2504
				TRANSPORT_SENSE_BUFFER);

2505
		memcpy(&buffer[offset], sense_buffer,
2506 2507 2508 2509 2510 2511
				TRANSPORT_SENSE_BUFFER);
		cmd->scsi_status = task->task_scsi_status;
		/* Automatically padded */
		cmd->scsi_sense_length =
				(TRANSPORT_SENSE_BUFFER + offset);

2512
		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2513
				" and sense\n",
2514
			dev->se_hba->hba_id, dev->transport->name,
2515 2516 2517
				cmd->scsi_status);
		return 0;
	}
2518
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2519 2520 2521 2522

	return -1;
}

2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2538 2539
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2540 2541 2542
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2543
		return -EINVAL;
2544 2545
	}

2546
	return 0;
2547 2548
}

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
	/*
	 * Determine if the received WRITE_SAME is used to for direct
	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
	 */
	int passthrough = (dev->transport->transport_type ==
				TRANSPORT_PLUGIN_PHBA_PDEV);

	if (!passthrough) {
		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
			pr_err("WRITE_SAME PBDATA and LBDATA"
				" bits not supported for Block Discard"
				" Emulation\n");
			return -ENOSYS;
		}
		/*
		 * Currently for the emulated case we only accept
		 * tpws with the UNMAP=1 bit set.
		 */
		if (!(flags[0] & 0x08)) {
			pr_err("WRITE_SAME w/o UNMAP bit not"
				" supported for Block Discard Emulation\n");
			return -ENOSYS;
		}
	}

	return 0;
}

2581 2582 2583 2584 2585
/*	transport_generic_cmd_sequencer():
 *
 *	Generic Command Sequencer that should work for most DAS transport
 *	drivers.
 *
2586
 *	Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
2587 2588 2589 2590 2591 2592 2593 2594
 *	RX Thread.
 *
 *	FIXME: Need to support other SCSI OPCODES where as well.
 */
static int transport_generic_cmd_sequencer(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
2595
	struct se_device *dev = cmd->se_dev;
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	int ret = 0, sector_ret = 0, passthrough;
	u32 sectors = 0, size = 0, pr_reg_type = 0;
	u16 service_action;
	u8 alua_ascq = 0;
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2607
		return -EINVAL;
2608 2609 2610 2611
	}
	/*
	 * Check status of Asymmetric Logical Unit Assignment port
	 */
2612
	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2613 2614
	if (ret != 0) {
		/*
L
Lucas De Marchi 已提交
2615
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2616 2617 2618 2619
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
2620
			pr_debug("[%s]: ALUA TG Port not available,"
2621
				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2622
				cmd->se_tfo->get_fabric_name(), alua_ascq);
2623

2624 2625 2626
			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2627
			return -EINVAL;
2628 2629 2630 2631 2632 2633
		}
		goto out_invalid_cdb_field;
	}
	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
2634 2635
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2636 2637 2638
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2639
			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2640 2641 2642
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
2643 2644 2645 2646 2647 2648 2649
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

2650 2651 2652 2653 2654 2655 2656
	/*
	 * If we operate in passthrough mode we skip most CDB emulation and
	 * instead hand the commands down to the physical SCSI device.
	 */
	passthrough =
		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);

2657 2658 2659 2660 2661 2662
	switch (cdb[0]) {
	case READ_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2663
		cmd->t_task_lba = transport_lba_21(cdb);
2664 2665 2666 2667 2668 2669 2670
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2671
		cmd->t_task_lba = transport_lba_32(cdb);
2672 2673 2674 2675 2676 2677 2678
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2679
		cmd->t_task_lba = transport_lba_32(cdb);
2680 2681 2682 2683 2684 2685 2686
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2687
		cmd->t_task_lba = transport_lba_64(cdb);
2688 2689 2690 2691 2692 2693 2694
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2695
		cmd->t_task_lba = transport_lba_21(cdb);
2696 2697 2698 2699 2700 2701 2702
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2703
		cmd->t_task_lba = transport_lba_32(cdb);
2704 2705
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2706 2707 2708 2709 2710 2711 2712
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2713
		cmd->t_task_lba = transport_lba_32(cdb);
2714 2715
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2716 2717 2718 2719 2720 2721 2722
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2723
		cmd->t_task_lba = transport_lba_64(cdb);
2724 2725
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2726 2727 2728 2729
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case XDWRITEREAD_10:
		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2730
		    !(cmd->se_cmd_flags & SCF_BIDI))
2731 2732 2733 2734 2735
			goto out_invalid_cdb_field;
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2736
		cmd->t_task_lba = transport_lba_32(cdb);
2737
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2738

2739 2740 2741 2742
		/*
		 * Do now allow BIDI commands for passthrough mode.
		 */
		if (passthrough)
2743
			goto out_unsupported_cdb;
2744

2745
		/*
2746
		 * Setup BIDI XOR callback to be run after I/O completion.
2747 2748
		 */
		cmd->transport_complete_callback = &transport_xor_callback;
2749 2750
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763
		break;
	case VARIABLE_LENGTH_CMD:
		service_action = get_unaligned_be16(&cdb[8]);
		switch (service_action) {
		case XDWRITEREAD_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
			size = transport_get_size(sectors, cdb, cmd);
			/*
			 * Use WRITE_32 and READ_32 opcodes for the emulated
			 * XDWRITE_READ_32 logic.
			 */
2764
			cmd->t_task_lba = transport_lba_64_ext(cdb);
2765 2766
			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;

2767 2768 2769
			/*
			 * Do now allow BIDI commands for passthrough mode.
			 */
2770
			if (passthrough)
2771
				goto out_unsupported_cdb;
2772

2773
			/*
2774 2775
			 * Setup BIDI XOR callback to be run during after I/O
			 * completion.
2776 2777
			 */
			cmd->transport_complete_callback = &transport_xor_callback;
2778 2779
			if (cdb[1] & 0x8)
				cmd->se_cmd_flags |= SCF_FUA;
2780 2781 2782 2783 2784
			break;
		case WRITE_SAME_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
2785

2786
			if (sectors)
2787
				size = transport_get_size(1, cdb, cmd);
2788 2789 2790 2791 2792
			else {
				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
				       " supported\n");
				goto out_invalid_cdb_field;
			}
2793

2794
			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2795 2796
			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

2797
			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2798
				goto out_unsupported_cdb;
2799 2800
			if (!passthrough)
				cmd->execute_task = target_emulate_write_same;
2801 2802
			break;
		default:
2803
			pr_err("VARIABLE_LENGTH_CMD service action"
2804 2805 2806 2807
				" 0x%04x not supported\n", service_action);
			goto out_unsupported_cdb;
		}
		break;
2808
	case MAINTENANCE_IN:
2809
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2810 2811 2812 2813
			/* MAINTENANCE_IN from SCC-2 */
			/*
			 * Check for emulated MI_REPORT_TARGET_PGS.
			 */
2814 2815 2816 2817
			if (cdb[1] == MI_REPORT_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_report_target_port_groups;
2818 2819 2820 2821 2822 2823 2824
			}
			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else {
			/* GPCMD_SEND_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2825
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
		break;
	case MODE_SELECT:
		size = cdb[4];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SELECT_10:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SENSE:
		size = cdb[4];
2837
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2838 2839
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
2840 2841
		break;
	case MODE_SENSE_10:
2842 2843 2844 2845 2846
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
		break;
2847 2848 2849 2850 2851
	case GPCMD_READ_BUFFER_CAPACITY:
	case GPCMD_SEND_OPC:
	case LOG_SELECT:
	case LOG_SENSE:
		size = (cdb[7] << 8) + cdb[8];
2852
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2853 2854 2855
		break;
	case READ_BLOCK_LIMITS:
		size = READ_BLOCK_LEN;
2856
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2857 2858 2859 2860 2861 2862 2863 2864 2865
		break;
	case GPCMD_GET_CONFIGURATION:
	case GPCMD_READ_FORMAT_CAPACITIES:
	case GPCMD_READ_DISC_INFO:
	case GPCMD_READ_TRACK_RZONE_INFO:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case PERSISTENT_RESERVE_IN:
2866
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2867
			cmd->execute_task = target_scsi3_emulate_pr_in;
2868 2869 2870
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
2871
	case PERSISTENT_RESERVE_OUT:
2872
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2873
			cmd->execute_task = target_scsi3_emulate_pr_out;
2874
		size = (cdb[7] << 8) + cdb[8];
2875
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2876 2877 2878 2879 2880 2881 2882 2883
		break;
	case GPCMD_MECHANISM_STATUS:
	case GPCMD_READ_DVD_STRUCTURE:
		size = (cdb[8] << 8) + cdb[9];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case READ_POSITION:
		size = READ_POSITION_LEN;
2884
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2885
		break;
2886
	case MAINTENANCE_OUT:
2887
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2888 2889 2890 2891
			/* MAINTENANCE_OUT from SCC-2
			 *
			 * Check for emulated MO_SET_TARGET_PGS.
			 */
2892 2893 2894 2895
			if (cdb[1] == MO_SET_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_set_target_port_groups;
2896 2897 2898 2899 2900 2901 2902 2903
			}

			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else  {
			/* GPCMD_REPORT_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2904
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2905 2906 2907 2908 2909 2910 2911
		break;
	case INQUIRY:
		size = (cdb[3] << 8) + cdb[4];
		/*
		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
		 * See spc4r17 section 5.3
		 */
2912
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2913
			cmd->sam_task_attr = MSG_HEAD_TAG;
2914
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2915 2916
		if (!passthrough)
			cmd->execute_task = target_emulate_inquiry;
2917 2918 2919
		break;
	case READ_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2920
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2921 2922 2923
		break;
	case READ_CAPACITY:
		size = READ_CAP_LEN;
2924
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2925 2926
		if (!passthrough)
			cmd->execute_task = target_emulate_readcapacity;
2927 2928 2929 2930 2931
		break;
	case READ_MEDIA_SERIAL_NUMBER:
	case SECURITY_PROTOCOL_IN:
	case SECURITY_PROTOCOL_OUT:
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2932
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2933 2934
		break;
	case SERVICE_ACTION_IN:
2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
		switch (cmd->t_task_cdb[1] & 0x1f) {
		case SAI_READ_CAPACITY_16:
			if (!passthrough)
				cmd->execute_task =
					target_emulate_readcapacity_16;
			break;
		default:
			if (passthrough)
				break;

			pr_err("Unsupported SA: 0x%02x\n",
				cmd->t_task_cdb[1] & 0x1f);
2947
			goto out_invalid_cdb_field;
2948 2949
		}
		/*FALLTHROUGH*/
2950 2951 2952 2953 2954 2955 2956 2957
	case ACCESS_CONTROL_IN:
	case ACCESS_CONTROL_OUT:
	case EXTENDED_COPY:
	case READ_ATTRIBUTE:
	case RECEIVE_COPY_RESULTS:
	case WRITE_ATTRIBUTE:
		size = (cdb[10] << 24) | (cdb[11] << 16) |
		       (cdb[12] << 8) | cdb[13];
2958
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2959 2960 2961 2962
		break;
	case RECEIVE_DIAGNOSTIC:
	case SEND_DIAGNOSTIC:
		size = (cdb[3] << 8) | cdb[4];
2963
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2964 2965 2966 2967 2968 2969
		break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
	case GPCMD_READ_CD:
		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
		size = (2336 * sectors);
2970
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2971 2972 2973 2974
		break;
#endif
	case READ_TOC:
		size = cdb[8];
2975
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2976 2977 2978
		break;
	case REQUEST_SENSE:
		size = cdb[4];
2979
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2980 2981
		if (!passthrough)
			cmd->execute_task = target_emulate_request_sense;
2982 2983 2984
		break;
	case READ_ELEMENT_STATUS:
		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2985
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2986 2987 2988
		break;
	case WRITE_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2989
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008
		break;
	case RESERVE:
	case RESERVE_10:
		/*
		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		 */
		if (cdb[0] == RESERVE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		/*
		 * Setup the legacy emulated handler for SPC-2 and
		 * >= SPC-3 compatible reservation handling (CRH=1)
		 * Otherwise, we assume the underlying SCSI logic is
		 * is running in SPC_PASSTHROUGH, and wants reservations
		 * emulation disabled.
		 */
3009 3010
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_reserve;
3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case RELEASE:
	case RELEASE_10:
		/*
		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		*/
		if (cdb[0] == RELEASE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

3024 3025
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_release;
3026 3027 3028
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case SYNCHRONIZE_CACHE:
3029
	case SYNCHRONIZE_CACHE_16:
3030 3031 3032 3033 3034
		/*
		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
		 */
		if (cdb[0] == SYNCHRONIZE_CACHE) {
			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3035
			cmd->t_task_lba = transport_lba_32(cdb);
3036 3037
		} else {
			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3038
			cmd->t_task_lba = transport_lba_64(cdb);
3039 3040 3041 3042 3043 3044 3045
		}
		if (sector_ret)
			goto out_unsupported_cdb;

		size = transport_get_size(sectors, cdb, cmd);
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;

3046
		if (passthrough)
3047
			break;
3048

3049 3050
		/*
		 * Check to ensure that LBA + Range does not exceed past end of
3051
		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3052
		 */
3053 3054 3055 3056
		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
			if (transport_cmd_get_valid_sectors(cmd) < 0)
				goto out_invalid_cdb_field;
		}
3057
		cmd->execute_task = target_emulate_synchronize_cache;
3058 3059 3060
		break;
	case UNMAP:
		size = get_unaligned_be16(&cdb[7]);
3061
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3062 3063
		if (!passthrough)
			cmd->execute_task = target_emulate_unmap;
3064 3065 3066 3067 3068
		break;
	case WRITE_SAME_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
3069

3070
		if (sectors)
3071
			size = transport_get_size(1, cdb, cmd);
3072 3073 3074 3075
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
		}
3076

3077
		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3078 3079 3080
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3081
			goto out_unsupported_cdb;
3082 3083
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3084 3085 3086 3087 3088 3089 3090
		break;
	case WRITE_SAME:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;

		if (sectors)
3091
			size = transport_get_size(1, cdb, cmd);
3092 3093 3094
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
3095
		}
3096 3097

		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3098
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3099 3100 3101 3102 3103
		/*
		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
		 * of byte 1 bit 3 UNMAP instead of original reserved field
		 */
		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3104
			goto out_unsupported_cdb;
3105 3106
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3107 3108 3109 3110 3111 3112 3113 3114 3115 3116
		break;
	case ALLOW_MEDIUM_REMOVAL:
	case ERASE:
	case REZERO_UNIT:
	case SEEK_10:
	case SPACE:
	case START_STOP:
	case TEST_UNIT_READY:
	case VERIFY:
	case WRITE_FILEMARKS:
3117 3118 3119 3120 3121 3122 3123 3124
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_noop;
		break;
	case GPCMD_CLOSE_TRACK:
	case INITIALIZE_ELEMENT_STATUS:
	case GPCMD_LOAD_UNLOAD:
	case GPCMD_SET_SPEED:
3125 3126 3127 3128
	case MOVE_MEDIUM:
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case REPORT_LUNS:
3129
		cmd->execute_task = target_report_luns;
3130 3131 3132 3133 3134
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
		/*
		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
		 * See spc4r17 section 5.3
		 */
3135
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3136
			cmd->sam_task_attr = MSG_HEAD_TAG;
3137
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3138 3139
		break;
	default:
3140
		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3141
			" 0x%02x, sending CHECK_CONDITION.\n",
3142
			cmd->se_tfo->get_fabric_name(), cdb[0]);
3143 3144 3145
		goto out_unsupported_cdb;
	}

3146 3147 3148
	if (cmd->unknown_data_length)
		cmd->data_length = size;

3149
	if (size != cmd->data_length) {
3150
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3151
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3152
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3153 3154 3155 3156 3157
				cmd->data_length, size, cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
3158
			pr_err("Rejecting underflow/overflow"
3159 3160 3161 3162 3163 3164 3165
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_SG_IO_CDB.
		 */
3166 3167
		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3168
				" CDB on non 512-byte sector setup subsystem"
3169
				" plugin: %s\n", dev->transport->name);
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

3184
	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
3185 3186
	    (sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors ||
	     sectors > dev->se_sub_dev->se_dev_attrib.max_sectors)) {
3187 3188 3189 3190 3191
		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
				   cdb[0], sectors);
		goto out_invalid_cdb_field;
	}

3192 3193 3194 3195 3196
	/* reject any command that we don't have a handler for */
	if (!(passthrough || cmd->execute_task ||
	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
		goto out_unsupported_cdb;

3197 3198 3199 3200 3201 3202
	transport_set_supported_SAM_opcode(cmd);
	return ret;

out_unsupported_cdb:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3203
	return -EINVAL;
3204 3205 3206
out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3207
	return -EINVAL;
3208 3209 3210
}

/*
3211
 * Called from I/O completion to determine which dormant/delayed
3212 3213 3214 3215
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
3216
	struct se_device *dev = cmd->se_dev;
3217 3218 3219
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

3220
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3221 3222 3223
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3224
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3225 3226
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3227
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3228
		dev->dev_cur_ordered_id++;
3229
		pr_debug("Incremented dev_cur_ordered_id: %u for"
3230 3231
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3232
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3233 3234 3235 3236
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
3237
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3238 3239 3240 3241 3242 3243 3244 3245 3246
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
3247
			&dev->delayed_cmd_list, se_delayed_node) {
3248

3249
		list_del(&cmd_p->se_delayed_node);
3250 3251
		spin_unlock(&dev->delayed_cmd_lock);

3252
		pr_debug("Calling add_tasks() for"
3253 3254
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
3255
			cmd_p->t_task_cdb[0],
3256 3257 3258 3259 3260 3261
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

		transport_add_tasks_from_cmd(cmd_p);
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
3262
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3263 3264 3265 3266 3267 3268 3269 3270
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
3271
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3272 3273
}

3274
static void transport_complete_qf(struct se_cmd *cmd)
3275 3276 3277
{
	int ret = 0;

3278 3279 3280 3281 3282 3283 3284 3285
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
3286 3287 3288 3289 3290 3291

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
3292
		if (cmd->t_bidi_data_sg) {
3293 3294
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
3295
				break;
3296 3297 3298 3299 3300 3301 3302 3303 3304
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

3305 3306 3307 3308 3309 3310 3311
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3312 3313 3314 3315
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
3316
	struct se_device *dev)
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

3327
static void target_complete_ok_work(struct work_struct *work)
3328
{
3329
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3330
	int reason = 0, ret;
3331

3332 3333 3334 3335 3336
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
3337
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3338
		transport_complete_task_attr(cmd);
3339 3340 3341 3342 3343 3344 3345
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		/*
		 * Only set when an struct se_task->task_scsi_status returned
		 * a non GOOD status.
		 */
		if (cmd->scsi_status) {
3359
			ret = transport_send_check_condition_and_sense(
3360
					cmd, reason, 1);
3361
			if (ret == -EAGAIN || ret == -ENOMEM)
3362 3363
				goto queue_full;

3364 3365 3366 3367 3368 3369
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
3370
	 * Check for a callback, used by amongst other things
3371 3372 3373 3374 3375 3376 3377 3378
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3379 3380
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3381 3382 3383 3384
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

3385
		ret = cmd->se_tfo->queue_data_in(cmd);
3386
		if (ret == -EAGAIN || ret == -ENOMEM)
3387
			goto queue_full;
3388 3389 3390
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3391 3392
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3393 3394 3395 3396 3397 3398
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
3399
		if (cmd->t_bidi_data_sg) {
3400
			spin_lock(&cmd->se_lun->lun_sep_lock);
3401 3402
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3403 3404 3405
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
3406
			ret = cmd->se_tfo->queue_data_in(cmd);
3407
			if (ret == -EAGAIN || ret == -ENOMEM)
3408
				goto queue_full;
3409 3410 3411 3412
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
3413
		ret = cmd->se_tfo->queue_status(cmd);
3414
		if (ret == -EAGAIN || ret == -ENOMEM)
3415
			goto queue_full;
3416 3417 3418 3419 3420 3421 3422
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3423 3424 3425
	return;

queue_full:
3426
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3427
		" data_direction: %d\n", cmd, cmd->data_direction);
3428 3429
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
3430 3431 3432 3433 3434 3435
}

static void transport_free_dev_tasks(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
3436
	LIST_HEAD(dispose_list);
3437

3438
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3439
	list_for_each_entry_safe(task, task_tmp,
3440
				&cmd->t_task_list, t_list) {
3441 3442 3443 3444 3445 3446 3447
		if (!(task->task_flags & TF_ACTIVE))
			list_move_tail(&task->t_list, &dispose_list);
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	while (!list_empty(&dispose_list)) {
		task = list_first_entry(&dispose_list, struct se_task, t_list);
3448 3449

		list_del(&task->t_list);
3450
		cmd->se_dev->transport->free_task(task);
3451 3452 3453
	}
}

3454
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3455
{
3456 3457
	struct scatterlist *sg;
	int count;
3458

3459 3460
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
3461

3462 3463
	kfree(sgl);
}
3464

3465 3466 3467 3468 3469 3470
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3471 3472
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
3473

3474
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3475 3476
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
3477 3478
}

C
Christoph Hellwig 已提交
3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

3490
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
3491 3492 3493 3494
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
3495 3496
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
3497
	 */
3498 3499 3500 3501
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
3502 3503 3504
	cmd->se_tfo->release_cmd(cmd);
}

3505 3506 3507 3508 3509 3510
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
3511
static void transport_put_cmd(struct se_cmd *cmd)
3512 3513
{
	unsigned long flags;
3514
	int free_tasks = 0;
3515

3516
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

	if (atomic_read(&cmd->t_se_count)) {
		if (!atomic_dec_and_test(&cmd->t_se_count))
			goto out_busy;
	}

3527 3528
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3529 3530
		transport_all_task_dev_remove_state(cmd);
		free_tasks = 1;
3531
	}
3532
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3533

3534 3535
	if (free_tasks != 0)
		transport_free_dev_tasks(cmd);
3536

3537
	transport_free_pages(cmd);
3538
	transport_release_cmd(cmd);
3539
	return;
3540 3541
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3542 3543 3544
}

/*
3545 3546
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
3558 3559 3560 3561
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
3562
{
3563
	if (!sgl || !sgl_count)
3564 3565 3566 3567
		return 0;

	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
		/*
		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
		 * scatterlists already have been set to follow what the fabric
		 * passes for the original expected data transfer length.
		 */
		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
			pr_warn("Rejecting SCSI DATA overflow for fabric using"
				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
			return -EINVAL;
		}
3580

3581 3582
		cmd->t_data_sg = sgl;
		cmd->t_data_nents = sgl_count;
3583

3584 3585 3586
		if (sgl_bidi && sgl_bidi_count) {
			cmd->t_bidi_data_sg = sgl_bidi;
			cmd->t_bidi_data_nents = sgl_bidi_count;
3587 3588 3589 3590 3591 3592 3593 3594
		}
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	}

	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

3595
void *transport_kmap_data_sg(struct se_cmd *cmd)
3596
{
3597
	struct scatterlist *sg = cmd->t_data_sg;
3598 3599
	struct page **pages;
	int i;
3600

3601
	BUG_ON(!sg);
3602
	/*
3603 3604 3605
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3606
	 */
3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3628
}
3629
EXPORT_SYMBOL(transport_kmap_data_sg);
3630

3631
void transport_kunmap_data_sg(struct se_cmd *cmd)
3632
{
3633
	if (!cmd->t_data_nents) {
3634
		return;
3635
	} else if (cmd->t_data_nents == 1) {
3636
		kunmap(sg_page(cmd->t_data_sg));
3637 3638
		return;
	}
3639 3640 3641

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
3642
}
3643
EXPORT_SYMBOL(transport_kunmap_data_sg);
3644

3645
static int
3646
transport_generic_get_mem(struct se_cmd *cmd)
3647
{
3648 3649 3650
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
3651
	gfp_t zero_flag;
3652
	int i = 0;
3653

3654 3655 3656 3657
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
3658

3659 3660
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
3661

3662 3663
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;

3664 3665
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
3666
		page = alloc_page(GFP_KERNEL | zero_flag);
3667 3668
		if (!page)
			goto out;
3669

3670 3671 3672
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
3673 3674 3675
	}
	return 0;

3676 3677 3678 3679
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
3680
	}
3681 3682 3683
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
3684 3685
}

3686 3687
/* Reduce sectors if they are too long for the device */
static inline sector_t transport_limit_task_sectors(
3688 3689
	struct se_device *dev,
	unsigned long long lba,
3690
	sector_t sectors)
3691
{
3692
	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3693

3694 3695 3696
	if (dev->transport->get_device_type(dev) == TYPE_DISK)
		if ((lba + sectors) > transport_dev_end_lba(dev))
			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3697

3698
	return sectors;
3699 3700
}

3701 3702 3703
/*
 * Break up cmd into chunks transport can handle
 */
3704 3705
static int
transport_allocate_data_tasks(struct se_cmd *cmd,
3706
	enum dma_data_direction data_direction,
3707
	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3708
{
3709
	struct se_device *dev = cmd->se_dev;
3710 3711 3712 3713
	struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
	sector_t sectors;
	struct se_task *task;
	unsigned long flags;
3714 3715 3716 3717

	if (transport_cmd_get_valid_sectors(cmd) < 0)
		return -EINVAL;

3718
	sectors = DIV_ROUND_UP(cmd->data_length, attr->block_size);
3719

3720 3721
	BUG_ON(cmd->data_length % attr->block_size);
	BUG_ON(sectors > attr->max_sectors);
3722

3723 3724 3725
	task = transport_generic_get_task(cmd, data_direction);
	if (!task)
		return -ENOMEM;
3726

3727 3728 3729
	task->task_sg = cmd_sg;
	task->task_sg_nents = sgl_nents;
	task->task_size = cmd->data_length;
3730

3731 3732
	task->task_lba = cmd->t_task_lba;
	task->task_sectors = sectors;
3733

3734 3735 3736
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3737

3738
	return 1;
3739 3740 3741
}

static int
3742
transport_allocate_control_task(struct se_cmd *cmd)
3743 3744
{
	struct se_task *task;
3745
	unsigned long flags;
3746

3747 3748 3749 3750 3751
	/* Workaround for handling zero-length control CDBs */
	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
	    !cmd->data_length)
		return 0;

3752 3753
	task = transport_generic_get_task(cmd, cmd->data_direction);
	if (!task)
3754
		return -ENOMEM;
3755

3756
	task->task_sg = cmd->t_data_sg;
3757
	task->task_size = cmd->data_length;
3758
	task->task_sg_nents = cmd->t_data_nents;
3759

3760 3761 3762
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3763

3764
	/* Success! Return number of tasks allocated */
3765
	return 1;
3766 3767
}

3768 3769 3770 3771
/*
 * Allocate any required ressources to execute the command, and either place
 * it on the execution queue if possible.  For writes we might not have the
 * payload yet, thus notify the fabric via a call to ->write_pending instead.
3772
 */
3773
int transport_generic_new_cmd(struct se_cmd *cmd)
3774
{
3775
	struct se_device *dev = cmd->se_dev;
3776
	int task_cdbs, task_cdbs_bidi = 0;
3777
	int set_counts = 1;
3778 3779 3780 3781 3782
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3783
	 * beforehand.
3784
	 */
3785 3786
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
3787
		ret = transport_generic_get_mem(cmd);
3788
		if (ret < 0)
3789
			goto out_fail;
3790
	}
3791

3792
	/*
3793
	 * For BIDI command set up the read tasks first.
3794
	 */
3795
	if (cmd->t_bidi_data_sg &&
3796 3797 3798
	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));

3799 3800 3801 3802
		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
				cmd->t_bidi_data_nents);
		if (task_cdbs_bidi <= 0)
3803 3804 3805 3806 3807 3808
			goto out_fail;

		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
		set_counts = 0;
	}
3809 3810 3811 3812 3813 3814 3815 3816 3817

	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
		task_cdbs = transport_allocate_data_tasks(cmd,
					cmd->data_direction, cmd->t_data_sg,
					cmd->t_data_nents);
	} else {
		task_cdbs = transport_allocate_control_task(cmd);
	}

3818
	if (task_cdbs < 0)
3819
		goto out_fail;
3820
	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3821
		spin_lock_irq(&cmd->t_state_lock);
3822
		cmd->t_state = TRANSPORT_COMPLETE;
3823 3824
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
3825 3826 3827 3828 3829 3830 3831 3832

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

3833 3834 3835 3836
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
3837 3838 3839 3840 3841 3842

	if (set_counts) {
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
	}

3843 3844 3845
	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3846

3847
	/*
3848
	 * For WRITEs, let the fabric know its buffer is ready..
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
	 * will be added to the struct se_device execution queue after its WRITE
	 * data has arrived. (ie: It gets handled by the transport processing
	 * thread a second time)
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
		transport_add_tasks_to_state_queue(cmd);
		return transport_generic_write_pending(cmd);
	}
	/*
	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
	 * to the execution queue.
	 */
	transport_execute_tasks(cmd);
	return 0;
3864 3865 3866 3867 3868

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
3869
}
3870
EXPORT_SYMBOL(transport_generic_new_cmd);
3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

3882
static void transport_write_pending_qf(struct se_cmd *cmd)
3883
{
3884 3885 3886 3887
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
3888 3889 3890 3891
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
3892 3893
}

3894 3895 3896 3897 3898
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

3899
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3900
	cmd->t_state = TRANSPORT_WRITE_PENDING;
3901
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3902

3903 3904
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
3905 3906 3907
	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
	 * from HW target mode interrupt code.  This is safe to be called
	 * with transport_off=1 before the cmd->se_tfo->write_pending
3908 3909 3910 3911 3912 3913 3914 3915
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
3916
	ret = cmd->se_tfo->write_pending(cmd);
3917
	if (ret == -EAGAIN || ret == -ENOMEM)
3918 3919
		goto queue_full;
	else if (ret < 0)
3920 3921
		return ret;

3922
	return 1;
3923 3924

queue_full:
3925
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3926
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3927
	transport_handle_queue_full(cmd, cmd->se_dev);
3928
	return 0;
3929 3930
}

3931
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3932
{
3933
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3934
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3935 3936
			 transport_wait_for_tasks(cmd);

3937
		transport_release_cmd(cmd);
3938 3939 3940 3941
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

3942 3943
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

3944
		if (cmd->se_lun)
3945 3946
			transport_lun_remove_cmd(cmd);

3947 3948
		transport_free_dev_tasks(cmd);

3949
		transport_put_cmd(cmd);
3950 3951 3952 3953
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

3954 3955 3956
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
3957
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
3958
 */
3959 3960
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			bool ack_kref)
3961 3962 3963
{
	unsigned long flags;

3964
	kref_init(&se_cmd->cmd_kref);
3965 3966 3967 3968 3969
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
3970
	if (ack_kref == true) {
3971
		kref_get(&se_cmd->cmd_kref);
3972 3973
		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
	}
3974

3975 3976 3977 3978 3979 3980 3981
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_get_sess_cmd);

3982
static void target_release_cmd_kref(struct kref *kref)
3983
{
3984 3985
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
3986 3987 3988 3989 3990
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3991
		se_cmd->se_tfo->release_cmd(se_cmd);
3992
		return;
3993 3994 3995 3996
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
3997
		return;
3998 3999 4000 4001
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

4002 4003 4004 4005 4006 4007 4008 4009 4010 4011
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
}
EXPORT_SYMBOL(target_put_sess_cmd);

/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 * @se_sess:	session to split
 */
void target_splice_sess_cmd_list(struct se_session *se_sess)
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	WARN_ON(!list_empty(&se_sess->sess_wait_list));
	INIT_LIST_HEAD(&se_sess->sess_wait_list);

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	se_sess->sess_tearing_down = 1;

	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);

	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_splice_sess_cmd_list);

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
				&se_sess->sess_wait_list, se_cmd_list) {
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
	int ret;
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
4094
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4095 4096 4097 4098 4099
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
4100
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4101
		transport_cmd_check_stop(cmd, 1, 0);
4102
		return -EPERM;
4103
	}
4104
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
4105
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4106

4107
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4108 4109 4110

	ret = transport_stop_tasks_for_cmd(cmd);

4111 4112
	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
			" %d\n", cmd, cmd->t_task_list_num, ret);
4113
	if (!ret) {
4114
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4115
				cmd->se_tfo->get_task_tag(cmd));
4116
		wait_for_completion(&cmd->transport_lun_stop_comp);
4117
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4118
				cmd->se_tfo->get_task_tag(cmd));
4119
	}
4120
	transport_remove_cmd_from_queue(cmd);
4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4134 4135 4136
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
4137
		list_del_init(&cmd->se_lun_node);
4138

4139 4140 4141 4142 4143
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
4144
		spin_lock(&cmd->t_state_lock);
4145
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4146
			"_lun_stop for  ITT: 0x%08x\n",
4147 4148
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4149
		cmd->transport_state |= CMD_T_LUN_STOP;
4150
		spin_unlock(&cmd->t_state_lock);
4151 4152 4153

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

4154 4155
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4156 4157
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4158 4159 4160 4161 4162 4163
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
4164
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4165 4166
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4167

4168
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4169 4170 4171 4172
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

4173
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4174
			"_wait_for_tasks(): SUCCESS\n",
4175 4176
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4177

4178
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4179
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4180
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4181 4182
			goto check_cond;
		}
4183
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4184
		transport_all_task_dev_remove_state(cmd);
4185
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201

		transport_free_dev_tasks(cmd);
		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
4202
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4203
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4204
			pr_debug("SE_LUN[%d] - Detected FE stop for"
4205 4206
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
4207
				cmd, cmd->se_tfo->get_task_tag(cmd));
4208

4209
			spin_unlock_irqrestore(&cmd->t_state_lock,
4210 4211
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
4212
			complete(&cmd->transport_lun_fe_stop_comp);
4213 4214 4215
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
4216
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4217
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4218

4219
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4220 4221 4222 4223 4224 4225 4226
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
4227
	struct se_lun *lun = p;
4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

4239
	kt = kthread_run(transport_clear_lun_thread, lun,
4240 4241
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
4242
		pr_err("Unable to start clear_lun thread\n");
4243
		return PTR_ERR(kt);
4244 4245 4246 4247 4248 4249
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

4250 4251 4252
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
4253
 *
4254 4255
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
4256
 */
4257
bool transport_wait_for_tasks(struct se_cmd *cmd)
4258 4259 4260
{
	unsigned long flags;

4261
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4262 4263
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4264
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4265
		return false;
4266 4267 4268 4269 4270
	}
	/*
	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
	 * has been set in transport_set_supported_SAM_opcode().
	 */
4271 4272
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4273
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4274
		return false;
4275
	}
4276 4277 4278
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
4279
	 * The cmd->transport_lun_stopped_sem will be upped by
4280 4281 4282
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
4283
	if (cmd->transport_state & CMD_T_LUN_STOP) {
4284
		pr_debug("wait_for_tasks: Stopping"
4285
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4286
			"_stop_comp); for ITT: 0x%08x\n",
4287
			cmd->se_tfo->get_task_tag(cmd));
4288 4289 4290 4291 4292 4293 4294
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
4295 4296 4297 4298
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
4299 4300 4301 4302 4303 4304 4305

		transport_all_task_dev_remove_state(cmd);
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
4306
		pr_debug("wait_for_tasks: Stopped"
4307
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4308
			"stop_comp); for ITT: 0x%08x\n",
4309
			cmd->se_tfo->get_task_tag(cmd));
4310

4311
		cmd->transport_state &= ~CMD_T_LUN_STOP;
4312
	}
4313

4314
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
4315
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4316
		return false;
4317
	}
4318

4319
	cmd->transport_state |= CMD_T_STOP;
4320

4321
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4322
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4323 4324
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4325

4326
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4327

4328
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4329

4330
	wait_for_completion(&cmd->t_transport_stop_comp);
4331

4332
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4333
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4334

4335
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4336
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4337
		cmd->se_tfo->get_task_tag(cmd));
4338

4339
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4340 4341

	return true;
4342
}
4343
EXPORT_SYMBOL(transport_wait_for_tasks);
4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

4377
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4378
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4379
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4380 4381 4382
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4383
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
4396
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4397 4398 4399 4400 4401 4402 4403
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
4404 4405
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4406
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4407 4408 4409 4410 4411
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
4412 4413 4414 4415
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4416
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4417 4418 4419 4420 4421 4422 4423 4424
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4425
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4426 4427 4428 4429 4430 4431 4432 4433
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4434
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4435 4436 4437 4438 4439 4440 4441 4442 4443
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4444
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4445 4446 4447 4448 4449 4450 4451 4452 4453 4454
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4455
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4456 4457
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4458 4459 4460 4461 4462 4463
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4464
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4465 4466
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4467 4468 4469 4470 4471 4472
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4473
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4484
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4495
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4496 4497 4498 4499 4500 4501 4502 4503 4504 4505
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4506
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4507 4508 4509 4510 4511 4512 4513 4514
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4515
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4516 4517 4518 4519 4520 4521 4522 4523 4524
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4525
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4536
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
4554
	return cmd->se_tfo->queue_status(cmd);
4555 4556 4557 4558 4559 4560 4561
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

4562
	if (cmd->transport_state & CMD_T_ABORTED) {
4563
		if (!send_status ||
4564 4565
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
4566

4567
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4568
			" status for CDB: 0x%02x ITT: 0x%08x\n",
4569
			cmd->t_task_cdb[0],
4570
			cmd->se_tfo->get_task_tag(cmd));
4571

4572
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4573
		cmd->se_tfo->queue_status(cmd);
4574 4575 4576 4577 4578 4579 4580 4581
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
4582 4583 4584 4585 4586 4587 4588 4589 4590
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

4591 4592 4593 4594 4595 4596 4597
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
4598
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4599
			cmd->transport_state |= CMD_T_ABORTED;
4600 4601 4602 4603
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4604

4605
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4606
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4607
		cmd->se_tfo->get_task_tag(cmd));
4608

4609
	cmd->se_tfo->queue_status(cmd);
4610 4611
}

C
Christoph Hellwig 已提交
4612
static int transport_generic_do_tmr(struct se_cmd *cmd)
4613
{
4614
	struct se_device *dev = cmd->se_dev;
4615 4616 4617 4618
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
4619
	case TMR_ABORT_TASK:
4620
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
4621
		break;
4622 4623 4624
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
4625 4626
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
4627
	case TMR_LUN_RESET:
4628 4629 4630 4631
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
4632
	case TMR_TARGET_WARM_RESET:
4633 4634
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4635
	case TMR_TARGET_COLD_RESET:
4636 4637 4638
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
4639
		pr_err("Uknown TMR function: 0x%02x.\n",
4640 4641 4642 4643 4644 4645
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4646
	cmd->se_tfo->queue_tm_rsp(cmd);
4647

4648
	transport_cmd_check_stop_to_fabric(cmd);
4649 4650 4651 4652 4653 4654 4655 4656 4657
	return 0;
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
4658
	int ret;
4659
	struct se_cmd *cmd;
J
Jörn Engel 已提交
4660
	struct se_device *dev = param;
4661 4662

	while (!kthread_should_stop()) {
4663 4664
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4665 4666 4667 4668 4669
				kthread_should_stop());
		if (ret < 0)
			goto out;

get_cmd:
4670 4671
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
4672 4673
			continue;

4674
		switch (cmd->t_state) {
4675 4676 4677
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
4678
		case TRANSPORT_NEW_CMD_MAP:
4679 4680
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
4681 4682 4683
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
4684
			ret = cmd->se_tfo->new_cmd_map(cmd);
4685
			if (ret < 0) {
4686
				transport_generic_request_failure(cmd);
4687 4688 4689
				break;
			}
			ret = transport_generic_new_cmd(cmd);
4690
			if (ret < 0) {
4691 4692
				transport_generic_request_failure(cmd);
				break;
4693 4694 4695 4696 4697 4698 4699 4700
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
4701
		case TRANSPORT_COMPLETE_QF_WP:
4702 4703 4704 4705
			transport_write_pending_qf(cmd);
			break;
		case TRANSPORT_COMPLETE_QF_OK:
			transport_complete_qf(cmd);
4706
			break;
4707
		default:
4708 4709 4710
			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
				"i_state: %d on SE LUN: %u\n",
				cmd->t_state,
4711 4712 4713
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
4714 4715 4716 4717 4718 4719 4720
			BUG();
		}

		goto get_cmd;
	}

out:
4721 4722
	WARN_ON(!list_empty(&dev->state_task_list));
	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4723 4724 4725
	dev->process_thread = NULL;
	return 0;
}