target_core_transport.c 129.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40 41 42 43 44
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
45
#include <scsi/scsi_tcq.h>
46 47

#include <target/target_core_base.h>
48 49
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
50 51
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
52
#include "target_core_internal.h"
53 54 55 56
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

57
static int sub_api_initialized;
58

59
static struct workqueue_struct *target_completion_wq;
60 61 62 63 64 65 66 67 68
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static int transport_generic_write_pending(struct se_cmd *);
69
static int transport_processing_thread(void *param);
70
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
71
static void transport_complete_task_attr(struct se_cmd *cmd);
72
static void transport_handle_queue_full(struct se_cmd *cmd,
73
		struct se_device *dev);
74
static void transport_free_dev_tasks(struct se_cmd *cmd);
75
static int transport_generic_get_mem(struct se_cmd *cmd);
76
static void transport_put_cmd(struct se_cmd *cmd);
77
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
78
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
79
static void transport_generic_request_failure(struct se_cmd *);
80
static void target_complete_ok_work(struct work_struct *work);
81

82
int init_se_kmem_caches(void)
83 84 85 86
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
87 88
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
89
				" failed\n");
90
		goto out;
91 92 93 94
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
95 96
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
97
		goto out_free_sess_cache;
98 99 100 101
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
102 103
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
104
				" failed\n");
105
		goto out_free_ua_cache;
106 107 108 109
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
110 111
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
112
				" failed\n");
113
		goto out_free_pr_reg_cache;
114 115 116 117
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
118 119
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
120
				"cache failed\n");
121
		goto out_free_lu_gp_cache;
122 123 124 125
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
126 127
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
128
				"cache failed\n");
129
		goto out_free_lu_gp_mem_cache;
130 131 132 133 134 135
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
136 137
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
138
				"mem_t failed\n");
139
		goto out_free_tg_pt_gp_cache;
140 141
	}

142 143 144 145 146
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

147
	return 0;
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
163
out:
164
	return -ENOMEM;
165 166
}

167
void release_se_kmem_caches(void)
168
{
169
	destroy_workqueue(target_completion_wq);
170 171 172 173 174 175 176 177 178
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

179 180 181
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182 183 184 185 186 187 188 189

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

190
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
191

192 193 194
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
195 196 197 198

	return new_index;
}

C
Christoph Hellwig 已提交
199
static void transport_init_queue_obj(struct se_queue_obj *qobj)
200 201 202 203 204 205 206
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}

207
void transport_subsystem_check_init(void)
208 209 210
{
	int ret;

211 212 213
	if (sub_api_initialized)
		return;

214 215
	ret = request_module("target_core_iblock");
	if (ret != 0)
216
		pr_err("Unable to load target_core_iblock\n");
217 218 219

	ret = request_module("target_core_file");
	if (ret != 0)
220
		pr_err("Unable to load target_core_file\n");
221 222 223

	ret = request_module("target_core_pscsi");
	if (ret != 0)
224
		pr_err("Unable to load target_core_pscsi\n");
225 226 227

	ret = request_module("target_core_stgt");
	if (ret != 0)
228
		pr_err("Unable to load target_core_stgt\n");
229

230
	sub_api_initialized = 1;
231
	return;
232 233 234 235 236 237 238
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
239 240
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
241 242 243 244 245
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
246 247 248
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
278
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
279
			memset(&buf[0], 0, PR_REG_ISID_LEN);
280
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

297
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
298
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	spin_lock_bh(&se_tpg->session_lock);
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
	spin_unlock_bh(&se_tpg->session_lock);
}
EXPORT_SYMBOL(transport_register_session);

void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
317
	unsigned long flags;
318 319 320 321
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
322
	if (se_nacl) {
323
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
324 325 326 327 328 329 330 331 332 333 334 335 336
		list_del(&se_sess->sess_acl_list);
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
337
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
338 339 340 341 342 343 344 345 346 347 348 349 350 351
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
	struct se_node_acl *se_nacl;
352
	unsigned long flags;
353

354
	if (!se_tpg) {
355 356 357 358
		transport_free_session(se_sess);
		return;
	}

359
	spin_lock_irqsave(&se_tpg->session_lock, flags);
360 361 362
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
363
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
364 365 366 367 368 369

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
370
	if (se_nacl) {
371
		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
372
		if (se_nacl->dynamic_node_acl) {
373 374
			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
					se_tpg)) {
375 376
				list_del(&se_nacl->acl_list);
				se_tpg->num_node_acls--;
377
				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
378 379 380

				core_tpg_wait_for_nacl_pr_ref(se_nacl);
				core_free_device_list_for_node(se_nacl, se_tpg);
381
				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
382
						se_nacl);
383
				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
384 385
			}
		}
386
		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
387 388 389 390
	}

	transport_free_session(se_sess);

391
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
392
		se_tpg->se_tpg_tfo->get_fabric_name());
393 394 395 396
}
EXPORT_SYMBOL(transport_deregister_session);

/*
397
 * Called with cmd->t_state_lock held.
398 399 400
 */
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
401
	struct se_device *dev = cmd->se_dev;
402 403 404
	struct se_task *task;
	unsigned long flags;

405 406
	if (!dev)
		return;
407

408
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
409
		if (task->task_flags & TF_ACTIVE)
410 411 412
			continue;

		spin_lock_irqsave(&dev->execute_task_lock, flags);
413 414 415
		if (task->t_state_active) {
			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
				cmd->se_tfo->get_task_tag(cmd), dev, task);
416

417 418 419 420 421
			list_del(&task->t_state_list);
			atomic_dec(&cmd->t_task_cdbs_ex_left);
			task->t_state_active = false;
		}
		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
422
	}
423

424 425 426 427
}

/*	transport_cmd_check_stop():
 *
428
 *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
429 430 431 432 433 434 435 436 437 438 439 440
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

441
	spin_lock_irqsave(&cmd->t_state_lock, flags);
442 443 444 445
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
446 447 448
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
449

450
		cmd->transport_state &= ~CMD_T_ACTIVE;
451 452
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);
453
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
454

455
		complete(&cmd->transport_lun_stop_comp);
456 457 458 459
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
460
	 * this command for frontend exceptions.
461
	 */
462 463 464
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
465
			cmd->se_tfo->get_task_tag(cmd));
466 467 468 469 470 471 472 473 474 475

		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
476
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
477

478
		complete(&cmd->t_transport_stop_comp);
479 480 481
		return 1;
	}
	if (transport_off) {
482
		cmd->transport_state &= ~CMD_T_ACTIVE;
483 484 485 486 487 488 489 490 491
		if (transport_off == 2) {
			transport_all_task_dev_remove_state(cmd);
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
492
			 * their internally allocated I/O reference now and
493
			 * struct se_cmd now.
494 495 496 497
			 *
			 * Fabric modules are expected to return '1' here if the
			 * se_cmd being passed is released at this point,
			 * or zero if not being released.
498
			 */
499
			if (cmd->se_tfo->check_stop_free != NULL) {
500
				spin_unlock_irqrestore(
501
					&cmd->t_state_lock, flags);
502

503
				return cmd->se_tfo->check_stop_free(cmd);
504 505
			}
		}
506
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
507 508 509 510

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
511
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
512 513 514 515 516 517 518 519 520 521 522

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
523
	struct se_lun *lun = cmd->se_lun;
524 525 526 527 528
	unsigned long flags;

	if (!lun)
		return;

529
	spin_lock_irqsave(&cmd->t_state_lock, flags);
530 531 532
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
		transport_all_task_dev_remove_state(cmd);
533
	}
534
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
535 536

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
537 538
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
539 540 541 542 543
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
544
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
545
		transport_lun_remove_cmd(cmd);
546 547 548

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
549
	if (remove) {
550
		transport_remove_cmd_from_queue(cmd);
551
		transport_put_cmd(cmd);
552
	}
553 554
}

555 556
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
		bool at_head)
557 558
{
	struct se_device *dev = cmd->se_dev;
559
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
560 561 562
	unsigned long flags;

	if (t_state) {
563
		spin_lock_irqsave(&cmd->t_state_lock, flags);
564
		cmd->t_state = t_state;
565
		cmd->transport_state |= CMD_T_ACTIVE;
566
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
567 568 569
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
570 571 572 573 574 575 576

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

577
	if (at_head)
578
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
579
	else
580
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
581
	cmd->transport_state |= CMD_T_QUEUED;
582 583 584 585 586
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

587 588
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
589
{
590
	struct se_cmd *cmd;
591 592 593 594 595 596 597
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
598
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
599

600
	cmd->transport_state &= ~CMD_T_QUEUED;
601
	list_del_init(&cmd->se_queue_node);
602 603 604
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

605
	return cmd;
606 607
}

608
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
609
{
610
	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
611 612 613
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
614
	if (!(cmd->transport_state & CMD_T_QUEUED)) {
615 616 617
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
618
	cmd->transport_state &= ~CMD_T_QUEUED;
619 620
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
621 622 623 624 625 626 627 628 629
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}

/*
 * Completion function used by TCM subsystem plugins (such as FILEIO)
 * for queueing up response from struct se_subsystem_api->do_task()
 */
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
630
	struct se_task *task = list_entry(cmd->t_task_list.next,
631 632 633 634 635 636 637
				struct se_task, t_list);

	if (good) {
		cmd->scsi_status = SAM_STAT_GOOD;
		task->task_scsi_status = GOOD;
	} else {
		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
638 639 640
		task->task_se_cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

641 642 643 644 645 646
	}

	transport_complete_task(task, good);
}
EXPORT_SYMBOL(transport_complete_sync_cache);

647 648 649 650
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

651
	transport_generic_request_failure(cmd);
652 653
}

654 655 656 657 658 659 660
/*	transport_complete_task():
 *
 *	Called from interrupt and non interrupt context depending
 *	on the transport plugin.
 */
void transport_complete_task(struct se_task *task, int success)
{
661
	struct se_cmd *cmd = task->task_se_cmd;
662
	struct se_device *dev = cmd->se_dev;
663 664
	unsigned long flags;

665
	spin_lock_irqsave(&cmd->t_state_lock, flags);
666
	task->task_flags &= ~TF_ACTIVE;
667 668 669 670 671 672 673 674 675

	/*
	 * See if any sense data exists, if so set the TASK_SENSE flag.
	 * Also check for any other post completion work that needs to be
	 * done by the plugins.
	 */
	if (dev && dev->transport->transport_complete) {
		if (dev->transport->transport_complete(task) != 0) {
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
676
			task->task_flags |= TF_HAS_SENSE;
677 678 679 680 681 682 683 684
			success = 1;
		}
	}

	/*
	 * See if we are waiting for outstanding struct se_task
	 * to complete for an exception condition
	 */
685
	if (task->task_flags & TF_REQUEST_STOP) {
686
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
687 688 689
		complete(&task->task_stop_comp);
		return;
	}
690 691

	if (!success)
692
		cmd->transport_state |= CMD_T_FAILED;
693

694 695 696 697 698
	/*
	 * Decrement the outstanding t_task_cdbs_left count.  The last
	 * struct se_task from struct se_cmd will complete itself into the
	 * device queue depending upon int success.
	 */
699
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
700
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701 702 703
		return;
	}

704
	if (cmd->transport_state & CMD_T_FAILED) {
705
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
706
		INIT_WORK(&cmd->work, target_complete_failure_work);
707
	} else {
708
		cmd->transport_state |= CMD_T_COMPLETE;
709
		INIT_WORK(&cmd->work, target_complete_ok_work);
710
	}
711 712

	cmd->t_state = TRANSPORT_COMPLETE;
713
	cmd->transport_state |= CMD_T_ACTIVE;
714
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
715

716
	queue_work(target_completion_wq, &cmd->work);
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
}
EXPORT_SYMBOL(transport_complete_task);

/*
 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
 * struct se_task list are ready to be added to the active execution list
 * struct se_device

 * Called with se_dev_t->execute_task_lock called.
 */
static inline int transport_add_task_check_sam_attr(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	/*
	 * No SAM Task attribute emulation enabled, add to tail of
	 * execution queue
	 */
	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
		return 0;
	}
	/*
	 * HEAD_OF_QUEUE attribute for received CDB, which means
	 * the first task that is associated with a struct se_cmd goes to
	 * head of the struct se_device->execute_task_list, and task_prev
	 * after that for each subsequent task
	 */
746
	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
747 748 749 750 751
		list_add(&task->t_execute_list,
				(task_prev != NULL) ?
				&task_prev->t_execute_list :
				&dev->execute_task_list);

752
		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
753
				" in execution queue\n",
754
				task->task_se_cmd->t_task_cdb[0]);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
		return 1;
	}
	/*
	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
	 * transitioned from Dermant -> Active state, and are added to the end
	 * of the struct se_device->execute_task_list
	 */
	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
	return 0;
}

/*	__transport_add_task_to_execute_queue():
 *
 *	Called with se_dev_t->execute_task_lock called.
 */
static void __transport_add_task_to_execute_queue(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	int head_of_queue;

	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
	atomic_inc(&dev->execute_tasks);

780
	if (task->t_state_active)
781 782 783 784 785 786 787 788 789 790 791 792 793
		return;
	/*
	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
	 * state list as well.  Running with SAM Task Attribute emulation
	 * will always return head_of_queue == 0 here
	 */
	if (head_of_queue)
		list_add(&task->t_state_list, (task_prev) ?
				&task_prev->t_state_list :
				&dev->state_task_list);
	else
		list_add_tail(&task->t_state_list, &dev->state_task_list);

794
	task->t_state_active = true;
795

796
	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
797
		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
798 799 800 801 802
		task, dev);
}

static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
803
	struct se_device *dev = cmd->se_dev;
804 805 806
	struct se_task *task;
	unsigned long flags;

807 808
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
809
		spin_lock(&dev->execute_task_lock);
810 811 812 813 814 815 816 817 818
		if (!task->t_state_active) {
			list_add_tail(&task->t_state_list,
				      &dev->state_task_list);
			task->t_state_active = true;

			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
				task->task_se_cmd->se_tfo->get_task_tag(
				task->task_se_cmd), task, dev);
		}
819 820
		spin_unlock(&dev->execute_task_lock);
	}
821
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
822 823
}

824
static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
825
{
826
	struct se_device *dev = cmd->se_dev;
827 828
	struct se_task *task, *task_prev = NULL;

829
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
830
		if (!list_empty(&task->t_execute_list))
831 832 833 834 835 836 837 838
			continue;
		/*
		 * __transport_add_task_to_execute_queue() handles the
		 * SAM Task Attribute emulation if enabled
		 */
		__transport_add_task_to_execute_queue(task, task_prev, dev);
		task_prev = task;
	}
839 840 841 842 843 844 845 846 847
}

static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
	unsigned long flags;
	struct se_device *dev = cmd->se_dev;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
	__transport_add_tasks_from_cmd(cmd);
848 849 850
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

851 852 853 854 855 856 857
void __transport_remove_task_from_execute_queue(struct se_task *task,
		struct se_device *dev)
{
	list_del_init(&task->t_execute_list);
	atomic_dec(&dev->execute_tasks);
}

C
Christoph Hellwig 已提交
858
static void transport_remove_task_from_execute_queue(
859 860 861 862 863
	struct se_task *task,
	struct se_device *dev)
{
	unsigned long flags;

864
	if (WARN_ON(list_empty(&task->t_execute_list)))
865 866
		return;

867
	spin_lock_irqsave(&dev->execute_task_lock, flags);
868
	__transport_remove_task_from_execute_queue(task, dev);
869 870 871
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

872
/*
873
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
874 875 876 877 878 879
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
880
	LIST_HEAD(qf_cmd_list);
881 882 883
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
884 885
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
886

887
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
888 889 890 891
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

892
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
893
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
894
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
895 896
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
897 898

		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
899 900 901
	}
}

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

945 946
	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
		atomic_read(&dev->execute_tasks), dev->queue_depth);
947
	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
948
		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1002
		pr_debug("%s", buf);
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1027 1028
	int ret = 0;
	int len;
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1045
		ret = -EINVAL;
1046 1047 1048 1049 1050 1051
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1052
		pr_debug("%s", buf);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1075 1076
	int ret = 0;
	int len;
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1103
		ret = -EINVAL;
1104 1105 1106
		break;
	}

1107 1108 1109
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1110
		strncpy(p_buf, buf, p_buf_len);
1111
	} else {
1112
		pr_debug("%s", buf);
1113
	}
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1156
		ret = -EINVAL;
1157 1158 1159 1160 1161 1162
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1163
		pr_debug("%s", buf);
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1214
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1215 1216 1217 1218 1219
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1220
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1221 1222
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1223 1224 1225 1226
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1227
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1228
	char buf[17];
1229 1230 1231 1232 1233 1234
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1235
			buf[i] = wwn->vendor[i];
1236
		else
1237 1238 1239
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1240 1241 1242

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1243
			buf[i] = wwn->model[i];
1244
		else
1245 1246 1247
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1248 1249 1250

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1251
			buf[i] = wwn->revision[i];
1252
		else
1253 1254 1255
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1256

1257
	device_type = dev->transport->get_device_type(dev);
1258 1259
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1260
				dev->transport->get_device_rev(dev));
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1273
	int force_pt;
1274 1275 1276
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1277 1278
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1279 1280 1281
		return NULL;
	}

1282
	transport_init_queue_obj(&dev->dev_queue_obj);
1283 1284
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1285
	dev->dev_ptr		= transport_dev;
1286 1287 1288 1289 1290 1291 1292 1293 1294
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->execute_task_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_task_list);
1295
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1296 1297 1298 1299 1300 1301
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1302
	spin_lock_init(&dev->qf_cmd_lock);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1337
					  "LIO_%s", dev->transport->name);
1338
	if (IS_ERR(dev->process_thread)) {
1339
		pr_err("Unable to create kthread: LIO_%s\n",
1340
			dev->transport->name);
1341 1342
		goto out;
	}
1343 1344 1345 1346
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1347 1348 1349 1350 1351 1352 1353 1354
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1355
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1356
		if (!inquiry_prod || !inquiry_rev) {
1357
			pr_err("All non TCM/pSCSI plugins require"
1358 1359 1360 1361
				" INQUIRY consts\n");
			goto out;
		}

1362 1363 1364
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1365 1366 1367
	}
	scsi_dump_inquiry(dev);

1368
	return dev;
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

static struct se_task *
transport_generic_get_task(struct se_cmd *cmd,
		enum dma_data_direction data_direction)
{
	struct se_task *task;
1417
	struct se_device *dev = cmd->se_dev;
1418

1419
	task = dev->transport->alloc_task(cmd->t_task_cdb);
1420
	if (!task) {
1421
		pr_err("Unable to allocate struct se_task\n");
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
		return NULL;
	}

	INIT_LIST_HEAD(&task->t_list);
	INIT_LIST_HEAD(&task->t_execute_list);
	INIT_LIST_HEAD(&task->t_state_list);
	init_completion(&task->task_stop_comp);
	task->task_se_cmd = cmd;
	task->task_data_direction = data_direction;

	return task;
}

static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1450 1451
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1452
	INIT_LIST_HEAD(&cmd->se_qf_node);
1453
	INIT_LIST_HEAD(&cmd->se_queue_node);
1454
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1455 1456 1457 1458
	INIT_LIST_HEAD(&cmd->t_task_list);
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1459
	init_completion(&cmd->cmd_wait_comp);
1460
	spin_lock_init(&cmd->t_state_lock);
1461
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1478
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1479 1480
		return 0;

1481
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1482
		pr_debug("SAM Task Attribute ACA"
1483
			" emulation is not supported\n");
1484
		return -EINVAL;
1485 1486 1487 1488 1489
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1490
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1491
	smp_mb__after_atomic_inc();
1492
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1493
			cmd->se_ordered_id, cmd->sam_task_attr,
1494
			cmd->se_dev->transport->name);
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
	return 0;
}

/*	transport_generic_allocate_tasks():
 *
 *	Called from fabric RX Thread.
 */
int transport_generic_allocate_tasks(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
	int ret;

	transport_generic_prepare_cdb(cdb);
	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1514
		pr_err("Received SCSI CDB with command_size: %d that"
1515 1516
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1517 1518
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1519
		return -EINVAL;
1520 1521 1522 1523 1524 1525
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1526 1527
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1528
						GFP_KERNEL);
1529 1530
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1531
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1532
				scsi_command_size(cdb),
1533
				(unsigned long)sizeof(cmd->__t_task_cdb));
1534 1535 1536
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1537
			return -ENOMEM;
1538 1539
		}
	} else
1540
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1541
	/*
1542
	 * Copy the original CDB into cmd->
1543
	 */
1544
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1545 1546 1547
	/*
	 * Setup the received CDB based on SCSI defined opcodes and
	 * perform unit attention, persistent reservations and ALUA
1548
	 * checks for virtual device backends.  The cmd->t_task_cdb
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
	 * pointer is expected to be setup before we reach this point.
	 */
	ret = transport_generic_cmd_sequencer(cmd, cdb);
	if (ret < 0)
		return ret;
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1560
		return -EINVAL;
1561 1562 1563 1564 1565 1566 1567 1568 1569
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
EXPORT_SYMBOL(transport_generic_allocate_tasks);

1570 1571 1572 1573 1574 1575 1576
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1577 1578
	int ret;

1579 1580
	if (!cmd->se_lun) {
		dump_stack();
1581
		pr_err("cmd->se_lun is NULL\n");
1582 1583 1584 1585
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1586
		pr_err("transport_generic_handle_cdb cannot be called"
1587 1588 1589
				" from interrupt context\n");
		return -EINVAL;
	}
1590
	/*
1591
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1592 1593
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
1594
	 * correctly during shutdown via transport_wait_for_tasks()
1595 1596 1597 1598 1599
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1600 1601
	cmd->transport_state |= CMD_T_ACTIVE;

1602 1603 1604 1605 1606 1607
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1608 1609 1610
	if (ret < 0)
		transport_generic_request_failure(cmd);

1611
	return 0;
1612 1613 1614
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1631
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1664 1665 1666 1667 1668 1669
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
		return;
	}
1670 1671 1672 1673 1674
	/*
	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
	 * allocate the necessary tasks to complete the received CDB+data
	 */
	rc = transport_generic_allocate_tasks(se_cmd, cdb);
1675 1676 1677 1678
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
		return;
	}
1679 1680 1681 1682 1683 1684 1685
	/*
	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
	 * for immediate execution of READs, otherwise wait for
	 * transport_generic_handle_data() to be called for WRITEs
	 * when fabric has filled the incoming buffer.
	 */
	transport_handle_cdb_direct(se_cmd);
1686
	return;
1687 1688 1689
}
EXPORT_SYMBOL(target_submit_cmd);

1690 1691 1692 1693 1694 1695 1696 1697
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1698
	if (!cmd->se_lun) {
1699
		dump_stack();
1700
		pr_err("cmd->se_lun is NULL\n");
1701
		return -EINVAL;
1702 1703
	}

1704
	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1723
		return -EPERM;
1724 1725 1726 1727
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1728
	 * fabric module as we are expecting no further incoming DATA OUT
1729 1730 1731 1732 1733
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

1734
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
1746
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1747 1748 1749 1750
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
/*
 * If the task is active, request it to be stopped and sleep until it
 * has completed.
 */
bool target_stop_task(struct se_task *task, unsigned long *flags)
{
	struct se_cmd *cmd = task->task_se_cmd;
	bool was_active = false;

	if (task->task_flags & TF_ACTIVE) {
		task->task_flags |= TF_REQUEST_STOP;
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

		pr_debug("Task %p waiting to complete\n", task);
		wait_for_completion(&task->task_stop_comp);
		pr_debug("Task %p stopped successfully\n", task);

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
		atomic_dec(&cmd->t_task_cdbs_left);
		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
		was_active = true;
	}

	return was_active;
}

1777 1778 1779 1780 1781 1782
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
	int ret = 0;

1783
	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1784
		cmd->se_tfo->get_task_tag(cmd));
1785 1786 1787 1788

	/*
	 * No tasks remain in the execution queue
	 */
1789
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1790
	list_for_each_entry_safe(task, task_tmp,
1791
				&cmd->t_task_list, t_list) {
1792
		pr_debug("Processing task %p\n", task);
1793 1794 1795 1796
		/*
		 * If the struct se_task has not been sent and is not active,
		 * remove the struct se_task from the execution queue.
		 */
1797
		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1798
			spin_unlock_irqrestore(&cmd->t_state_lock,
1799 1800
					flags);
			transport_remove_task_from_execute_queue(task,
1801
					cmd->se_dev);
1802

1803
			pr_debug("Task %p removed from execute queue\n", task);
1804
			spin_lock_irqsave(&cmd->t_state_lock, flags);
1805 1806 1807
			continue;
		}

1808
		if (!target_stop_task(task, &flags)) {
1809
			pr_debug("Task %p - did nothing\n", task);
1810 1811 1812
			ret++;
		}
	}
1813
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1814 1815 1816 1817 1818 1819 1820

	return ret;
}

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1821
static void transport_generic_request_failure(struct se_cmd *cmd)
1822
{
1823 1824
	int ret = 0;

1825
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1826
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1827
		cmd->t_task_cdb[0]);
1828
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1829
		cmd->se_tfo->get_cmd_state(cmd),
1830
		cmd->t_state, cmd->scsi_sense_reason);
1831
	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1832
		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1833 1834
		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
		cmd->t_task_list_num,
1835 1836 1837
		atomic_read(&cmd->t_task_cdbs_left),
		atomic_read(&cmd->t_task_cdbs_sent),
		atomic_read(&cmd->t_task_cdbs_ex_left),
1838 1839 1840
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1841 1842 1843 1844 1845 1846 1847

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1859
		break;
1860
	case TCM_RESERVATION_CONFLICT:
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
1875 1876 1877
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1878 1879 1880
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

1881
		ret = cmd->se_tfo->queue_status(cmd);
1882
		if (ret == -EAGAIN || ret == -ENOMEM)
1883
			goto queue_full;
1884 1885
		goto check_stop;
	default:
1886
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1887
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1888 1889 1890
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
1891 1892 1893 1894 1895 1896 1897
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
1898 1899 1900 1901
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
1902

1903 1904
check_stop:
	transport_lun_remove_cmd(cmd);
1905
	if (!transport_cmd_check_stop_to_fabric(cmd))
1906
		;
1907 1908 1909
	return;

queue_full:
1910 1911
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
}

static inline u32 transport_lba_21(unsigned char *cdb)
{
	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}

static inline u32 transport_lba_32(unsigned char *cdb)
{
	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}

static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

/*
 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 */
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
	unsigned long flags;

1951
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1952
	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1953
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
}

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
1965
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1966 1967
		return 1;
	/*
L
Lucas De Marchi 已提交
1968
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1969 1970
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
1971
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1972
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
1973
			" 0x%02x, se_ordered_id: %u\n",
1974
			cmd->t_task_cdb[0],
1975 1976
			cmd->se_ordered_id);
		return 1;
1977
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1978
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
1979 1980
		smp_mb__after_atomic_inc();

1981
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1982
				" list, se_ordered_id: %u\n",
1983
				cmd->t_task_cdb[0],
1984 1985 1986 1987 1988 1989
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
1990
		if (!atomic_read(&cmd->se_dev->simple_cmds))
1991 1992 1993 1994 1995
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
1996
		atomic_inc(&cmd->se_dev->simple_cmds);
1997 1998 1999 2000 2001 2002 2003
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2004
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2005 2006
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2007
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2008
		 */
2009
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2010
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2011 2012 2013
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2014

2015
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2016
			" delayed CMD list, se_ordered_id: %u\n",
2017
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
static int transport_execute_tasks(struct se_cmd *cmd)
{
	int add_tasks;
2038
	struct se_device *se_dev = cmd->se_dev;
2039 2040
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2041
	 * has occurred that prevents execution.
2042
	 */
2043
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2044 2045 2046 2047 2048
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2049
		if (!add_tasks)
2050 2051
			goto execute_tasks;
		/*
2052 2053 2054
		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
		 * adds associated se_tasks while holding dev->execute_task_lock
		 * before I/O dispath to avoid a double spinlock access.
2055
		 */
2056 2057
		__transport_execute_tasks(se_dev, cmd);
		return 0;
2058
	}
2059

2060
execute_tasks:
2061
	__transport_execute_tasks(se_dev, NULL);
2062 2063 2064 2065 2066 2067 2068 2069 2070
	return 0;
}

/*
 * Called to check struct se_device tcq depth window, and once open pull struct se_task
 * from struct se_device->execute_task_list and
 *
 * Called from transport_processing_thread()
 */
2071
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2072 2073 2074
{
	int error;
	struct se_cmd *cmd = NULL;
2075
	struct se_task *task = NULL;
2076 2077 2078
	unsigned long flags;

check_depth:
2079
	spin_lock_irq(&dev->execute_task_lock);
2080 2081 2082
	if (new_cmd != NULL)
		__transport_add_tasks_from_cmd(new_cmd);

2083 2084
	if (list_empty(&dev->execute_task_list)) {
		spin_unlock_irq(&dev->execute_task_lock);
2085 2086
		return 0;
	}
2087 2088
	task = list_first_entry(&dev->execute_task_list,
				struct se_task, t_execute_list);
2089
	__transport_remove_task_from_execute_queue(task, dev);
2090
	spin_unlock_irq(&dev->execute_task_lock);
2091

2092
	cmd = task->task_se_cmd;
2093
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2094
	task->task_flags |= (TF_ACTIVE | TF_SENT);
2095
	atomic_inc(&cmd->t_task_cdbs_sent);
2096

2097 2098
	if (atomic_read(&cmd->t_task_cdbs_sent) ==
	    cmd->t_task_list_num)
2099
		cmd->transport_state |= CMD_T_SENT;
2100

2101
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2102

2103 2104 2105 2106
	if (cmd->execute_task)
		error = cmd->execute_task(task);
	else
		error = dev->transport->do_task(task);
2107 2108 2109
	if (error != 0) {
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		task->task_flags &= ~TF_ACTIVE;
2110
		cmd->transport_state &= ~CMD_T_SENT;
2111
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2112

2113
		transport_stop_tasks_for_cmd(cmd);
2114
		transport_generic_request_failure(cmd);
2115 2116
	}

2117
	new_cmd = NULL;
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	goto check_depth;

	return 0;
}

static inline u32 transport_get_sectors_6(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2128
	struct se_device *dev = cmd->se_dev;
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 8-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2140
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2141 2142 2143 2144
		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
2145 2146 2147 2148 2149 2150
	 * Use 8-bit sector value.  SBC-3 says:
	 *
	 *   A TRANSFER LENGTH field set to zero specifies that 256
	 *   logical blocks shall be written.  Any other value
	 *   specifies the number of logical blocks that shall be
	 *   written.
2151 2152
	 */
type_disk:
2153
	return cdb[4] ? : 256;
2154 2155 2156 2157 2158 2159 2160
}

static inline u32 transport_get_sectors_10(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2161
	struct se_device *dev = cmd->se_dev;
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 16-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_10 is not defined in SSC, throw an exception
	 */
2173 2174
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 16-bit sector value.
	 */
type_disk:
	return (u32)(cdb[7] << 8) + cdb[8];
}

static inline u32 transport_get_sectors_12(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2191
	struct se_device *dev = cmd->se_dev;
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_12 is not defined in SSC, throw an exception
	 */
2203 2204
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 32-bit sector value.
	 */
type_disk:
	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}

static inline u32 transport_get_sectors_16(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2221
	struct se_device *dev = cmd->se_dev;
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2233
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];

type_disk:
	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
		    (cdb[12] << 8) + cdb[13];
}

/*
 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 */
static inline u32 transport_get_sectors_32(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
		    (cdb[30] << 8) + cdb[31];

}

static inline u32 transport_get_size(
	u32 sectors,
	unsigned char *cdb,
	struct se_cmd *cmd)
{
2263
	struct se_device *dev = cmd->se_dev;
2264

2265
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2266
		if (cdb[1] & 1) { /* sectors */
2267
			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2268 2269 2270 2271
		} else /* bytes */
			return sectors;
	}
#if 0
2272
	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2273 2274 2275
			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
			dev->transport->name);
2276
#endif
2277
	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2278 2279 2280 2281 2282
}

static void transport_xor_callback(struct se_cmd *cmd)
{
	unsigned char *buf, *addr;
2283
	struct scatterlist *sg;
2284 2285
	unsigned int offset;
	int i;
2286
	int count;
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
	/*
	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
	 *
	 * 1) read the specified logical block(s);
	 * 2) transfer logical blocks from the data-out buffer;
	 * 3) XOR the logical blocks transferred from the data-out buffer with
	 *    the logical blocks read, storing the resulting XOR data in a buffer;
	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
	 *    blocks transferred from the data-out buffer; and
	 * 5) transfer the resulting XOR data to the data-in buffer.
	 */
	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2299 2300
	if (!buf) {
		pr_err("Unable to allocate xor_callback buf\n");
2301 2302 2303
		return;
	}
	/*
2304
	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2305 2306
	 * into the locally allocated *buf
	 */
2307 2308 2309 2310 2311
	sg_copy_to_buffer(cmd->t_data_sg,
			  cmd->t_data_nents,
			  buf,
			  cmd->data_length);

2312 2313
	/*
	 * Now perform the XOR against the BIDI read memory located at
2314
	 * cmd->t_mem_bidi_list
2315 2316 2317
	 */

	offset = 0;
2318 2319 2320
	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
		addr = kmap_atomic(sg_page(sg), KM_USER0);
		if (!addr)
2321 2322
			goto out;

2323 2324
		for (i = 0; i < sg->length; i++)
			*(addr + sg->offset + i) ^= *(buf + offset + i);
2325

2326
		offset += sg->length;
2327 2328
		kunmap_atomic(addr, KM_USER0);
	}
2329

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
out:
	kfree(buf);
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2340
	struct se_device *dev = cmd->se_dev;
2341 2342 2343 2344
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;
	u32 offset = 0;

2345 2346
	WARN_ON(!cmd->se_lun);

2347 2348 2349
	if (!dev)
		return 0;

2350
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2351
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2352
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2353 2354 2355 2356
		return 0;
	}

	list_for_each_entry_safe(task, task_tmp,
2357
				&cmd->t_task_list, t_list) {
2358
		if (!(task->task_flags & TF_HAS_SENSE))
2359 2360
			continue;

2361
		if (!dev->transport->get_sense_buffer) {
2362
			pr_err("dev->transport->get_sense_buffer"
2363 2364 2365 2366
					" is NULL\n");
			continue;
		}

2367
		sense_buffer = dev->transport->get_sense_buffer(task);
2368
		if (!sense_buffer) {
2369
			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2370
				" sense buffer for task with sense\n",
2371
				cmd->se_tfo->get_task_tag(cmd), task);
2372 2373
			continue;
		}
2374
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2375

2376
		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2377 2378
				TRANSPORT_SENSE_BUFFER);

2379
		memcpy(&buffer[offset], sense_buffer,
2380 2381 2382 2383 2384 2385
				TRANSPORT_SENSE_BUFFER);
		cmd->scsi_status = task->task_scsi_status;
		/* Automatically padded */
		cmd->scsi_sense_length =
				(TRANSPORT_SENSE_BUFFER + offset);

2386
		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2387
				" and sense\n",
2388
			dev->se_hba->hba_id, dev->transport->name,
2389 2390 2391
				cmd->scsi_status);
		return 0;
	}
2392
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2393 2394 2395 2396

	return -1;
}

2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2412 2413
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2414 2415 2416
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2417
		return -EINVAL;
2418 2419
	}

2420
	return 0;
2421 2422
}

2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
	/*
	 * Determine if the received WRITE_SAME is used to for direct
	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
	 */
	int passthrough = (dev->transport->transport_type ==
				TRANSPORT_PLUGIN_PHBA_PDEV);

	if (!passthrough) {
		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
			pr_err("WRITE_SAME PBDATA and LBDATA"
				" bits not supported for Block Discard"
				" Emulation\n");
			return -ENOSYS;
		}
		/*
		 * Currently for the emulated case we only accept
		 * tpws with the UNMAP=1 bit set.
		 */
		if (!(flags[0] & 0x08)) {
			pr_err("WRITE_SAME w/o UNMAP bit not"
				" supported for Block Discard Emulation\n");
			return -ENOSYS;
		}
	}

	return 0;
}

2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
/*	transport_generic_cmd_sequencer():
 *
 *	Generic Command Sequencer that should work for most DAS transport
 *	drivers.
 *
 *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
 *	RX Thread.
 *
 *	FIXME: Need to support other SCSI OPCODES where as well.
 */
static int transport_generic_cmd_sequencer(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
2469
	struct se_device *dev = cmd->se_dev;
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	int ret = 0, sector_ret = 0, passthrough;
	u32 sectors = 0, size = 0, pr_reg_type = 0;
	u16 service_action;
	u8 alua_ascq = 0;
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2481
		return -EINVAL;
2482 2483 2484 2485
	}
	/*
	 * Check status of Asymmetric Logical Unit Assignment port
	 */
2486
	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2487 2488
	if (ret != 0) {
		/*
L
Lucas De Marchi 已提交
2489
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2490 2491 2492 2493 2494
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
#if 0
2495
			pr_debug("[%s]: ALUA TG Port not available,"
2496
				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2497
				cmd->se_tfo->get_fabric_name(), alua_ascq);
2498 2499 2500 2501
#endif
			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2502
			return -EINVAL;
2503 2504 2505 2506 2507 2508
		}
		goto out_invalid_cdb_field;
	}
	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
2509 2510
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2511 2512 2513 2514 2515 2516
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
2517 2518 2519 2520 2521 2522 2523
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

2524 2525 2526 2527 2528 2529 2530
	/*
	 * If we operate in passthrough mode we skip most CDB emulation and
	 * instead hand the commands down to the physical SCSI device.
	 */
	passthrough =
		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);

2531 2532 2533 2534 2535 2536
	switch (cdb[0]) {
	case READ_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2537
		cmd->t_task_lba = transport_lba_21(cdb);
2538 2539 2540 2541 2542 2543 2544
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2545
		cmd->t_task_lba = transport_lba_32(cdb);
2546 2547 2548 2549 2550 2551 2552
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2553
		cmd->t_task_lba = transport_lba_32(cdb);
2554 2555 2556 2557 2558 2559 2560
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2561
		cmd->t_task_lba = transport_lba_64(cdb);
2562 2563 2564 2565 2566 2567 2568
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2569
		cmd->t_task_lba = transport_lba_21(cdb);
2570 2571 2572 2573 2574 2575 2576
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2577
		cmd->t_task_lba = transport_lba_32(cdb);
2578 2579
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2580 2581 2582 2583 2584 2585 2586
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2587
		cmd->t_task_lba = transport_lba_32(cdb);
2588 2589
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2590 2591 2592 2593 2594 2595 2596
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2597
		cmd->t_task_lba = transport_lba_64(cdb);
2598 2599
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2600 2601 2602 2603
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case XDWRITEREAD_10:
		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2604
		    !(cmd->se_cmd_flags & SCF_BIDI))
2605 2606 2607 2608 2609
			goto out_invalid_cdb_field;
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2610
		cmd->t_task_lba = transport_lba_32(cdb);
2611
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2612

2613 2614 2615 2616
		/*
		 * Do now allow BIDI commands for passthrough mode.
		 */
		if (passthrough)
2617
			goto out_unsupported_cdb;
2618

2619
		/*
2620
		 * Setup BIDI XOR callback to be run after I/O completion.
2621 2622
		 */
		cmd->transport_complete_callback = &transport_xor_callback;
2623 2624
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
		break;
	case VARIABLE_LENGTH_CMD:
		service_action = get_unaligned_be16(&cdb[8]);
		switch (service_action) {
		case XDWRITEREAD_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
			size = transport_get_size(sectors, cdb, cmd);
			/*
			 * Use WRITE_32 and READ_32 opcodes for the emulated
			 * XDWRITE_READ_32 logic.
			 */
2638
			cmd->t_task_lba = transport_lba_64_ext(cdb);
2639 2640
			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;

2641 2642 2643
			/*
			 * Do now allow BIDI commands for passthrough mode.
			 */
2644
			if (passthrough)
2645
				goto out_unsupported_cdb;
2646

2647
			/*
2648 2649
			 * Setup BIDI XOR callback to be run during after I/O
			 * completion.
2650 2651
			 */
			cmd->transport_complete_callback = &transport_xor_callback;
2652 2653
			if (cdb[1] & 0x8)
				cmd->se_cmd_flags |= SCF_FUA;
2654 2655 2656 2657 2658
			break;
		case WRITE_SAME_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
2659

2660
			if (sectors)
2661
				size = transport_get_size(1, cdb, cmd);
2662 2663 2664 2665 2666
			else {
				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
				       " supported\n");
				goto out_invalid_cdb_field;
			}
2667

2668
			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2669 2670
			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

2671
			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2672
				goto out_unsupported_cdb;
2673 2674
			if (!passthrough)
				cmd->execute_task = target_emulate_write_same;
2675 2676
			break;
		default:
2677
			pr_err("VARIABLE_LENGTH_CMD service action"
2678 2679 2680 2681
				" 0x%04x not supported\n", service_action);
			goto out_unsupported_cdb;
		}
		break;
2682
	case MAINTENANCE_IN:
2683
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2684 2685 2686 2687
			/* MAINTENANCE_IN from SCC-2 */
			/*
			 * Check for emulated MI_REPORT_TARGET_PGS.
			 */
2688 2689 2690 2691
			if (cdb[1] == MI_REPORT_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_report_target_port_groups;
2692 2693 2694 2695 2696 2697 2698
			}
			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else {
			/* GPCMD_SEND_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2699
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
		break;
	case MODE_SELECT:
		size = cdb[4];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SELECT_10:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SENSE:
		size = cdb[4];
2711
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2712 2713
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
2714 2715
		break;
	case MODE_SENSE_10:
2716 2717 2718 2719 2720
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
		break;
2721 2722 2723 2724 2725
	case GPCMD_READ_BUFFER_CAPACITY:
	case GPCMD_SEND_OPC:
	case LOG_SELECT:
	case LOG_SENSE:
		size = (cdb[7] << 8) + cdb[8];
2726
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2727 2728 2729
		break;
	case READ_BLOCK_LIMITS:
		size = READ_BLOCK_LEN;
2730
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2731 2732 2733 2734 2735 2736 2737 2738 2739
		break;
	case GPCMD_GET_CONFIGURATION:
	case GPCMD_READ_FORMAT_CAPACITIES:
	case GPCMD_READ_DISC_INFO:
	case GPCMD_READ_TRACK_RZONE_INFO:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case PERSISTENT_RESERVE_IN:
2740
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2741
			cmd->execute_task = target_scsi3_emulate_pr_in;
2742 2743 2744
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
2745
	case PERSISTENT_RESERVE_OUT:
2746
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2747
			cmd->execute_task = target_scsi3_emulate_pr_out;
2748
		size = (cdb[7] << 8) + cdb[8];
2749
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2750 2751 2752 2753 2754 2755 2756 2757
		break;
	case GPCMD_MECHANISM_STATUS:
	case GPCMD_READ_DVD_STRUCTURE:
		size = (cdb[8] << 8) + cdb[9];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case READ_POSITION:
		size = READ_POSITION_LEN;
2758
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2759
		break;
2760
	case MAINTENANCE_OUT:
2761
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2762 2763 2764 2765
			/* MAINTENANCE_OUT from SCC-2
			 *
			 * Check for emulated MO_SET_TARGET_PGS.
			 */
2766 2767 2768 2769
			if (cdb[1] == MO_SET_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_set_target_port_groups;
2770 2771 2772 2773 2774 2775 2776 2777
			}

			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else  {
			/* GPCMD_REPORT_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2778
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2779 2780 2781 2782 2783 2784 2785
		break;
	case INQUIRY:
		size = (cdb[3] << 8) + cdb[4];
		/*
		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
		 * See spc4r17 section 5.3
		 */
2786
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2787
			cmd->sam_task_attr = MSG_HEAD_TAG;
2788
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2789 2790
		if (!passthrough)
			cmd->execute_task = target_emulate_inquiry;
2791 2792 2793
		break;
	case READ_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2794
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2795 2796 2797
		break;
	case READ_CAPACITY:
		size = READ_CAP_LEN;
2798
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2799 2800
		if (!passthrough)
			cmd->execute_task = target_emulate_readcapacity;
2801 2802 2803 2804 2805
		break;
	case READ_MEDIA_SERIAL_NUMBER:
	case SECURITY_PROTOCOL_IN:
	case SECURITY_PROTOCOL_OUT:
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2806
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2807 2808
		break;
	case SERVICE_ACTION_IN:
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
		switch (cmd->t_task_cdb[1] & 0x1f) {
		case SAI_READ_CAPACITY_16:
			if (!passthrough)
				cmd->execute_task =
					target_emulate_readcapacity_16;
			break;
		default:
			if (passthrough)
				break;

			pr_err("Unsupported SA: 0x%02x\n",
				cmd->t_task_cdb[1] & 0x1f);
			goto out_unsupported_cdb;
		}
		/*FALLTHROUGH*/
2824 2825 2826 2827 2828 2829 2830 2831
	case ACCESS_CONTROL_IN:
	case ACCESS_CONTROL_OUT:
	case EXTENDED_COPY:
	case READ_ATTRIBUTE:
	case RECEIVE_COPY_RESULTS:
	case WRITE_ATTRIBUTE:
		size = (cdb[10] << 24) | (cdb[11] << 16) |
		       (cdb[12] << 8) | cdb[13];
2832
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2833 2834 2835 2836
		break;
	case RECEIVE_DIAGNOSTIC:
	case SEND_DIAGNOSTIC:
		size = (cdb[3] << 8) | cdb[4];
2837
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2838 2839 2840 2841 2842 2843
		break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
	case GPCMD_READ_CD:
		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
		size = (2336 * sectors);
2844
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2845 2846 2847 2848
		break;
#endif
	case READ_TOC:
		size = cdb[8];
2849
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2850 2851 2852
		break;
	case REQUEST_SENSE:
		size = cdb[4];
2853
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2854 2855
		if (!passthrough)
			cmd->execute_task = target_emulate_request_sense;
2856 2857 2858
		break;
	case READ_ELEMENT_STATUS:
		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2859
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2860 2861 2862
		break;
	case WRITE_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2863
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
		break;
	case RESERVE:
	case RESERVE_10:
		/*
		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		 */
		if (cdb[0] == RESERVE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		/*
		 * Setup the legacy emulated handler for SPC-2 and
		 * >= SPC-3 compatible reservation handling (CRH=1)
		 * Otherwise, we assume the underlying SCSI logic is
		 * is running in SPC_PASSTHROUGH, and wants reservations
		 * emulation disabled.
		 */
2883 2884
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_reserve;
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case RELEASE:
	case RELEASE_10:
		/*
		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		*/
		if (cdb[0] == RELEASE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

2898 2899
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_release;
2900 2901 2902
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case SYNCHRONIZE_CACHE:
2903
	case SYNCHRONIZE_CACHE_16:
2904 2905 2906 2907 2908
		/*
		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
		 */
		if (cdb[0] == SYNCHRONIZE_CACHE) {
			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2909
			cmd->t_task_lba = transport_lba_32(cdb);
2910 2911
		} else {
			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2912
			cmd->t_task_lba = transport_lba_64(cdb);
2913 2914 2915 2916 2917 2918 2919
		}
		if (sector_ret)
			goto out_unsupported_cdb;

		size = transport_get_size(sectors, cdb, cmd);
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;

2920
		if (passthrough)
2921
			break;
2922

2923 2924
		/*
		 * Check to ensure that LBA + Range does not exceed past end of
2925
		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2926
		 */
2927 2928 2929 2930
		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
			if (transport_cmd_get_valid_sectors(cmd) < 0)
				goto out_invalid_cdb_field;
		}
2931
		cmd->execute_task = target_emulate_synchronize_cache;
2932 2933 2934
		break;
	case UNMAP:
		size = get_unaligned_be16(&cdb[7]);
2935
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2936 2937
		if (!passthrough)
			cmd->execute_task = target_emulate_unmap;
2938 2939 2940 2941 2942
		break;
	case WRITE_SAME_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
2943

2944
		if (sectors)
2945
			size = transport_get_size(1, cdb, cmd);
2946 2947 2948 2949
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
		}
2950

2951
		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2952 2953 2954
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2955
			goto out_unsupported_cdb;
2956 2957
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
2958 2959 2960 2961 2962 2963 2964
		break;
	case WRITE_SAME:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;

		if (sectors)
2965
			size = transport_get_size(1, cdb, cmd);
2966 2967 2968
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
2969
		}
2970 2971

		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2972
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2973 2974 2975 2976 2977
		/*
		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
		 * of byte 1 bit 3 UNMAP instead of original reserved field
		 */
		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2978
			goto out_unsupported_cdb;
2979 2980
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
		break;
	case ALLOW_MEDIUM_REMOVAL:
	case ERASE:
	case REZERO_UNIT:
	case SEEK_10:
	case SPACE:
	case START_STOP:
	case TEST_UNIT_READY:
	case VERIFY:
	case WRITE_FILEMARKS:
2991 2992 2993 2994 2995 2996 2997 2998
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_noop;
		break;
	case GPCMD_CLOSE_TRACK:
	case INITIALIZE_ELEMENT_STATUS:
	case GPCMD_LOAD_UNLOAD:
	case GPCMD_SET_SPEED:
2999 3000 3001 3002
	case MOVE_MEDIUM:
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case REPORT_LUNS:
3003
		cmd->execute_task = target_report_luns;
3004 3005 3006 3007 3008
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
		/*
		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
		 * See spc4r17 section 5.3
		 */
3009
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3010
			cmd->sam_task_attr = MSG_HEAD_TAG;
3011
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3012 3013
		break;
	default:
3014
		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3015
			" 0x%02x, sending CHECK_CONDITION.\n",
3016
			cmd->se_tfo->get_fabric_name(), cdb[0]);
3017 3018 3019 3020
		goto out_unsupported_cdb;
	}

	if (size != cmd->data_length) {
3021
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3022
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3023
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3024 3025 3026 3027 3028
				cmd->data_length, size, cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
3029
			pr_err("Rejecting underflow/overflow"
3030 3031 3032 3033 3034 3035 3036
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_SG_IO_CDB.
		 */
3037 3038
		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3039
				" CDB on non 512-byte sector setup subsystem"
3040
				" plugin: %s\n", dev->transport->name);
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

3055 3056 3057 3058 3059
	/* reject any command that we don't have a handler for */
	if (!(passthrough || cmd->execute_task ||
	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
		goto out_unsupported_cdb;

3060 3061 3062 3063 3064 3065
	transport_set_supported_SAM_opcode(cmd);
	return ret;

out_unsupported_cdb:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3066
	return -EINVAL;
3067 3068 3069
out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3070
	return -EINVAL;
3071 3072 3073
}

/*
3074
 * Called from I/O completion to determine which dormant/delayed
3075 3076 3077 3078
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
3079
	struct se_device *dev = cmd->se_dev;
3080 3081 3082
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

3083
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3084 3085 3086
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3087
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3088 3089
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3090
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3091
		dev->dev_cur_ordered_id++;
3092
		pr_debug("Incremented dev_cur_ordered_id: %u for"
3093 3094
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3095
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3096 3097 3098 3099
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
3100
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3101 3102 3103 3104 3105 3106 3107 3108 3109
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
3110
			&dev->delayed_cmd_list, se_delayed_node) {
3111

3112
		list_del(&cmd_p->se_delayed_node);
3113 3114
		spin_unlock(&dev->delayed_cmd_lock);

3115
		pr_debug("Calling add_tasks() for"
3116 3117
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
3118
			cmd_p->t_task_cdb[0],
3119 3120 3121 3122 3123 3124
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

		transport_add_tasks_from_cmd(cmd_p);
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
3125
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3126 3127 3128 3129 3130 3131 3132 3133
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
3134
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3135 3136
}

3137
static void transport_complete_qf(struct se_cmd *cmd)
3138 3139 3140
{
	int ret = 0;

3141 3142 3143 3144 3145 3146 3147 3148
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
3149 3150 3151 3152 3153 3154

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
3155
		if (cmd->t_bidi_data_sg) {
3156 3157
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
3158
				break;
3159 3160 3161 3162 3163 3164 3165 3166 3167
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

3168 3169 3170 3171 3172 3173 3174
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3175 3176 3177 3178
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
3179
	struct se_device *dev)
3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

3190
static void target_complete_ok_work(struct work_struct *work)
3191
{
3192
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3193
	int reason = 0, ret;
3194

3195 3196 3197 3198 3199
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
3200
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3201
		transport_complete_task_attr(cmd);
3202 3203 3204 3205 3206 3207 3208
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		/*
		 * Only set when an struct se_task->task_scsi_status returned
		 * a non GOOD status.
		 */
		if (cmd->scsi_status) {
3222
			ret = transport_send_check_condition_and_sense(
3223
					cmd, reason, 1);
3224
			if (ret == -EAGAIN || ret == -ENOMEM)
3225 3226
				goto queue_full;

3227 3228 3229 3230 3231 3232
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
3233
	 * Check for a callback, used by amongst other things
3234 3235 3236 3237 3238 3239 3240 3241
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3242 3243
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3244 3245 3246 3247
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

3248
		ret = cmd->se_tfo->queue_data_in(cmd);
3249
		if (ret == -EAGAIN || ret == -ENOMEM)
3250
			goto queue_full;
3251 3252 3253
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3254 3255
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3256 3257 3258 3259 3260 3261
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
3262
		if (cmd->t_bidi_data_sg) {
3263
			spin_lock(&cmd->se_lun->lun_sep_lock);
3264 3265
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3266 3267 3268
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
3269
			ret = cmd->se_tfo->queue_data_in(cmd);
3270
			if (ret == -EAGAIN || ret == -ENOMEM)
3271
				goto queue_full;
3272 3273 3274 3275
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
3276
		ret = cmd->se_tfo->queue_status(cmd);
3277
		if (ret == -EAGAIN || ret == -ENOMEM)
3278
			goto queue_full;
3279 3280 3281 3282 3283 3284 3285
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3286 3287 3288
	return;

queue_full:
3289
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3290
		" data_direction: %d\n", cmd, cmd->data_direction);
3291 3292
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
3293 3294 3295 3296 3297 3298
}

static void transport_free_dev_tasks(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
3299
	LIST_HEAD(dispose_list);
3300

3301
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3302
	list_for_each_entry_safe(task, task_tmp,
3303
				&cmd->t_task_list, t_list) {
3304 3305 3306 3307 3308 3309 3310
		if (!(task->task_flags & TF_ACTIVE))
			list_move_tail(&task->t_list, &dispose_list);
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	while (!list_empty(&dispose_list)) {
		task = list_first_entry(&dispose_list, struct se_task, t_list);
3311

3312 3313 3314
		if (task->task_sg != cmd->t_data_sg &&
		    task->task_sg != cmd->t_bidi_data_sg)
			kfree(task->task_sg);
3315 3316 3317

		list_del(&task->t_list);

3318
		cmd->se_dev->transport->free_task(task);
3319 3320 3321
	}
}

3322
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3323
{
3324 3325
	struct scatterlist *sg;
	int count;
3326

3327 3328
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
3329

3330 3331
	kfree(sgl);
}
3332

3333 3334 3335 3336 3337 3338
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3339 3340
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
3341

3342
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3343 3344
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
3345 3346
}

C
Christoph Hellwig 已提交
3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

3358
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
3359 3360 3361 3362
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
3363 3364
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
3365
	 */
3366 3367 3368 3369
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
3370 3371 3372
	cmd->se_tfo->release_cmd(cmd);
}

3373 3374 3375 3376 3377 3378
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
3379
static void transport_put_cmd(struct se_cmd *cmd)
3380 3381
{
	unsigned long flags;
3382
	int free_tasks = 0;
3383

3384
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3385 3386 3387 3388 3389 3390 3391 3392 3393 3394
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

	if (atomic_read(&cmd->t_se_count)) {
		if (!atomic_dec_and_test(&cmd->t_se_count))
			goto out_busy;
	}

3395 3396
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3397 3398
		transport_all_task_dev_remove_state(cmd);
		free_tasks = 1;
3399
	}
3400
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3401

3402 3403
	if (free_tasks != 0)
		transport_free_dev_tasks(cmd);
3404

3405
	transport_free_pages(cmd);
3406
	transport_release_cmd(cmd);
3407
	return;
3408 3409
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3410 3411 3412
}

/*
3413 3414
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
3426 3427 3428 3429
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
3430
{
3431
	if (!sgl || !sgl_count)
3432 3433 3434 3435
		return 0;

	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447
		/*
		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
		 * scatterlists already have been set to follow what the fabric
		 * passes for the original expected data transfer length.
		 */
		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
			pr_warn("Rejecting SCSI DATA overflow for fabric using"
				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
			return -EINVAL;
		}
3448

3449 3450
		cmd->t_data_sg = sgl;
		cmd->t_data_nents = sgl_count;
3451

3452 3453 3454
		if (sgl_bidi && sgl_bidi_count) {
			cmd->t_bidi_data_sg = sgl_bidi;
			cmd->t_bidi_data_nents = sgl_bidi_count;
3455 3456 3457 3458 3459 3460 3461 3462
		}
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	}

	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

3463
void *transport_kmap_data_sg(struct se_cmd *cmd)
3464
{
3465
	struct scatterlist *sg = cmd->t_data_sg;
3466 3467
	struct page **pages;
	int i;
3468

3469
	BUG_ON(!sg);
3470
	/*
3471 3472 3473
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3474
	 */
3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3496
}
3497
EXPORT_SYMBOL(transport_kmap_data_sg);
3498

3499
void transport_kunmap_data_sg(struct se_cmd *cmd)
3500
{
3501 3502 3503 3504 3505 3506 3507
	if (!cmd->t_data_nents)
		return;
	else if (cmd->t_data_nents == 1)
		kunmap(sg_page(cmd->t_data_sg));

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
3508
}
3509
EXPORT_SYMBOL(transport_kunmap_data_sg);
3510

3511
static int
3512
transport_generic_get_mem(struct se_cmd *cmd)
3513
{
3514 3515 3516
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
3517
	gfp_t zero_flag;
3518
	int i = 0;
3519

3520 3521 3522 3523
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
3524

3525 3526
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
3527

3528 3529
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;

3530 3531
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
3532
		page = alloc_page(GFP_KERNEL | zero_flag);
3533 3534
		if (!page)
			goto out;
3535

3536 3537 3538
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
3539 3540 3541
	}
	return 0;

3542 3543 3544 3545
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
3546
	}
3547 3548 3549
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
3550 3551
}

3552 3553
/* Reduce sectors if they are too long for the device */
static inline sector_t transport_limit_task_sectors(
3554 3555
	struct se_device *dev,
	unsigned long long lba,
3556
	sector_t sectors)
3557
{
3558
	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3559

3560 3561 3562
	if (dev->transport->get_device_type(dev) == TYPE_DISK)
		if ((lba + sectors) > transport_dev_end_lba(dev))
			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3563

3564
	return sectors;
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575
}


/*
 * This function can be used by HW target mode drivers to create a linked
 * scatterlist from all contiguously allocated struct se_task->task_sg[].
 * This is intended to be called during the completion path by TCM Core
 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
 */
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
3576 3577 3578 3579
	struct scatterlist *sg_first = NULL;
	struct scatterlist *sg_prev = NULL;
	int sg_prev_nents = 0;
	struct scatterlist *sg;
3580
	struct se_task *task;
3581
	u32 chained_nents = 0;
3582 3583
	int i;

3584 3585
	BUG_ON(!cmd->se_tfo->task_sg_chaining);

3586 3587
	/*
	 * Walk the struct se_task list and setup scatterlist chains
3588
	 * for each contiguously allocated struct se_task->task_sg[].
3589
	 */
3590
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
3591
		if (!task->task_sg)
3592 3593
			continue;

3594 3595
		if (!sg_first) {
			sg_first = task->task_sg;
3596
			chained_nents = task->task_sg_nents;
3597
		} else {
3598
			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3599
			chained_nents += task->task_sg_nents;
3600
		}
3601 3602 3603
		/*
		 * For the padded tasks, use the extra SGL vector allocated
		 * in transport_allocate_data_tasks() for the sg_prev_nents
3604 3605 3606 3607 3608
		 * offset into sg_chain() above.
		 *
		 * We do not need the padding for the last task (or a single
		 * task), but in that case we will never use the sg_prev_nents
		 * value below which would be incorrect.
3609
		 */
3610
		sg_prev_nents = (task->task_sg_nents + 1);
3611
		sg_prev = task->task_sg;
3612 3613 3614 3615 3616
	}
	/*
	 * Setup the starting pointer and total t_tasks_sg_linked_no including
	 * padding SGs for linking and to mark the end.
	 */
3617
	cmd->t_tasks_sg_chained = sg_first;
3618
	cmd->t_tasks_sg_chained_no = chained_nents;
3619

3620
	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3621 3622
		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
		cmd->t_tasks_sg_chained_no);
3623

3624 3625
	for_each_sg(cmd->t_tasks_sg_chained, sg,
			cmd->t_tasks_sg_chained_no, i) {
3626

3627
		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3628
			i, sg, sg_page(sg), sg->length, sg->offset);
3629
		if (sg_is_chain(sg))
3630
			pr_debug("SG: %p sg_is_chain=1\n", sg);
3631
		if (sg_is_last(sg))
3632
			pr_debug("SG: %p sg_is_last=1\n", sg);
3633 3634 3635 3636
	}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);

3637 3638 3639
/*
 * Break up cmd into chunks transport can handle
 */
3640 3641
static int
transport_allocate_data_tasks(struct se_cmd *cmd,
3642
	enum dma_data_direction data_direction,
3643
	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3644
{
3645
	struct se_device *dev = cmd->se_dev;
3646
	int task_count, i;
3647 3648 3649 3650 3651 3652 3653 3654 3655
	unsigned long long lba;
	sector_t sectors, dev_max_sectors;
	u32 sector_size;

	if (transport_cmd_get_valid_sectors(cmd) < 0)
		return -EINVAL;

	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3656

3657
	WARN_ON(cmd->data_length % sector_size);
3658 3659

	lba = cmd->t_task_lba;
3660
	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3661
	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688

	/*
	 * If we need just a single task reuse the SG list in the command
	 * and avoid a lot of work.
	 */
	if (task_count == 1) {
		struct se_task *task;
		unsigned long flags;

		task = transport_generic_get_task(cmd, data_direction);
		if (!task)
			return -ENOMEM;

		task->task_sg = cmd_sg;
		task->task_sg_nents = sgl_nents;

		task->task_lba = lba;
		task->task_sectors = sectors;
		task->task_size = task->task_sectors * sector_size;

		spin_lock_irqsave(&cmd->t_state_lock, flags);
		list_add_tail(&task->t_list, &cmd->t_task_list);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);

		return task_count;
	}

3689
	for (i = 0; i < task_count; i++) {
3690
		struct se_task *task;
3691
		unsigned int task_size, task_sg_nents_padded;
3692 3693
		struct scatterlist *sg;
		unsigned long flags;
3694
		int count;
3695

3696
		task = transport_generic_get_task(cmd, data_direction);
3697
		if (!task)
3698
			return -ENOMEM;
3699 3700

		task->task_lba = lba;
3701 3702
		task->task_sectors = min(sectors, dev_max_sectors);
		task->task_size = task->task_sectors * sector_size;
3703

3704 3705 3706 3707 3708
		/*
		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
		 * in order to calculate the number per task SGL entries
		 */
		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3709
		/*
3710 3711 3712
		 * Check if the fabric module driver is requesting that all
		 * struct se_task->task_sg[] be chained together..  If so,
		 * then allocate an extra padding SG entry for linking and
3713 3714 3715
		 * marking the end of the chained SGL for every task except
		 * the last one for (task_count > 1) operation, or skipping
		 * the extra padding for the (task_count == 1) case.
3716
		 */
3717 3718 3719 3720
		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
			task_sg_nents_padded = (task->task_sg_nents + 1);
		} else
			task_sg_nents_padded = task->task_sg_nents;
3721

3722
		task->task_sg = kmalloc(sizeof(struct scatterlist) *
3723
					task_sg_nents_padded, GFP_KERNEL);
3724 3725 3726 3727 3728
		if (!task->task_sg) {
			cmd->se_dev->transport->free_task(task);
			return -ENOMEM;
		}

3729
		sg_init_table(task->task_sg, task_sg_nents_padded);
3730

3731 3732 3733
		task_size = task->task_size;

		/* Build new sgl, only up to task_size */
3734
		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3735 3736 3737 3738 3739 3740
			if (cmd_sg->length > task_size)
				break;

			*sg = *cmd_sg;
			task_size -= cmd_sg->length;
			cmd_sg = sg_next(cmd_sg);
3741 3742
		}

3743 3744
		lba += task->task_sectors;
		sectors -= task->task_sectors;
3745

3746 3747 3748
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		list_add_tail(&task->t_list, &cmd->t_task_list);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3749 3750
	}

3751
	return task_count;
3752 3753 3754
}

static int
3755
transport_allocate_control_task(struct se_cmd *cmd)
3756 3757
{
	struct se_task *task;
3758
	unsigned long flags;
3759

3760 3761 3762 3763 3764
	/* Workaround for handling zero-length control CDBs */
	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
	    !cmd->data_length)
		return 0;

3765 3766
	task = transport_generic_get_task(cmd, cmd->data_direction);
	if (!task)
3767
		return -ENOMEM;
3768

3769
	task->task_sg = cmd->t_data_sg;
3770
	task->task_size = cmd->data_length;
3771
	task->task_sg_nents = cmd->t_data_nents;
3772

3773 3774 3775
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3776

3777
	/* Success! Return number of tasks allocated */
3778
	return 1;
3779 3780
}

3781 3782 3783 3784
/*
 * Allocate any required ressources to execute the command, and either place
 * it on the execution queue if possible.  For writes we might not have the
 * payload yet, thus notify the fabric via a call to ->write_pending instead.
3785
 */
3786
int transport_generic_new_cmd(struct se_cmd *cmd)
3787
{
3788
	struct se_device *dev = cmd->se_dev;
3789
	int task_cdbs, task_cdbs_bidi = 0;
3790
	int set_counts = 1;
3791 3792 3793 3794 3795
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3796
	 * beforehand.
3797
	 */
3798 3799
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
3800
		ret = transport_generic_get_mem(cmd);
3801
		if (ret < 0)
3802
			goto out_fail;
3803
	}
3804

3805
	/*
3806
	 * For BIDI command set up the read tasks first.
3807
	 */
3808
	if (cmd->t_bidi_data_sg &&
3809 3810 3811
	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));

3812 3813 3814 3815
		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
				cmd->t_bidi_data_nents);
		if (task_cdbs_bidi <= 0)
3816 3817 3818 3819 3820 3821
			goto out_fail;

		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
		set_counts = 0;
	}
3822 3823 3824 3825 3826 3827 3828 3829 3830

	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
		task_cdbs = transport_allocate_data_tasks(cmd,
					cmd->data_direction, cmd->t_data_sg,
					cmd->t_data_nents);
	} else {
		task_cdbs = transport_allocate_control_task(cmd);
	}

3831
	if (task_cdbs < 0)
3832
		goto out_fail;
3833
	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3834
		spin_lock_irq(&cmd->t_state_lock);
3835
		cmd->t_state = TRANSPORT_COMPLETE;
3836 3837
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
3838 3839 3840 3841 3842 3843 3844 3845

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

3846 3847 3848 3849
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
3850 3851 3852 3853 3854 3855

	if (set_counts) {
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
	}

3856 3857 3858
	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3859

3860
	/*
3861
	 * For WRITEs, let the fabric know its buffer is ready..
3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
	 * will be added to the struct se_device execution queue after its WRITE
	 * data has arrived. (ie: It gets handled by the transport processing
	 * thread a second time)
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
		transport_add_tasks_to_state_queue(cmd);
		return transport_generic_write_pending(cmd);
	}
	/*
	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
	 * to the execution queue.
	 */
	transport_execute_tasks(cmd);
	return 0;
3877 3878 3879 3880 3881

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
3882
}
3883
EXPORT_SYMBOL(transport_generic_new_cmd);
3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

3895
static void transport_write_pending_qf(struct se_cmd *cmd)
3896
{
3897 3898 3899 3900
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
3901 3902 3903 3904
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
3905 3906
}

3907 3908 3909 3910 3911
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

3912
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3913
	cmd->t_state = TRANSPORT_WRITE_PENDING;
3914
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3915

3916 3917
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
3918 3919 3920
	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
	 * from HW target mode interrupt code.  This is safe to be called
	 * with transport_off=1 before the cmd->se_tfo->write_pending
3921 3922 3923 3924 3925 3926 3927 3928
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
3929
	ret = cmd->se_tfo->write_pending(cmd);
3930
	if (ret == -EAGAIN || ret == -ENOMEM)
3931 3932
		goto queue_full;
	else if (ret < 0)
3933 3934
		return ret;

3935
	return 1;
3936 3937

queue_full:
3938
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3939
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3940
	transport_handle_queue_full(cmd, cmd->se_dev);
3941
	return 0;
3942 3943
}

3944
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3945
{
3946
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3947
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3948 3949
			 transport_wait_for_tasks(cmd);

3950
		transport_release_cmd(cmd);
3951 3952 3953 3954
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

3955 3956
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

3957
		if (cmd->se_lun)
3958 3959
			transport_lun_remove_cmd(cmd);

3960 3961
		transport_free_dev_tasks(cmd);

3962
		transport_put_cmd(cmd);
3963 3964 3965 3966
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

3967 3968 3969
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
3970
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
3971
 */
3972 3973
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			bool ack_kref)
3974 3975 3976
{
	unsigned long flags;

3977
	kref_init(&se_cmd->cmd_kref);
3978 3979 3980 3981 3982 3983 3984
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
	if (ack_kref == true)
		kref_get(&se_cmd->cmd_kref);
3985

3986 3987 3988 3989 3990 3991 3992
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_get_sess_cmd);

3993
static void target_release_cmd_kref(struct kref *kref)
3994
{
3995 3996
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
3997 3998 3999 4000 4001 4002
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		WARN_ON(1);
4003
		return;
4004 4005 4006 4007
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
4008
		return;
4009 4010 4011 4012
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
}
EXPORT_SYMBOL(target_put_sess_cmd);

/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 * @se_sess:	session to split
 */
void target_splice_sess_cmd_list(struct se_session *se_sess)
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	WARN_ON(!list_empty(&se_sess->sess_wait_list));
	INIT_LIST_HEAD(&se_sess->sess_wait_list);

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	se_sess->sess_tearing_down = 1;

	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);

	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_splice_sess_cmd_list);

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
				&se_sess->sess_wait_list, se_cmd_list) {
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
	int ret;
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
4105
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4106 4107 4108 4109 4110
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
4111
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4112
		transport_cmd_check_stop(cmd, 1, 0);
4113
		return -EPERM;
4114
	}
4115
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
4116
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4117

4118
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4119 4120 4121

	ret = transport_stop_tasks_for_cmd(cmd);

4122 4123
	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
			" %d\n", cmd, cmd->t_task_list_num, ret);
4124
	if (!ret) {
4125
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4126
				cmd->se_tfo->get_task_tag(cmd));
4127
		wait_for_completion(&cmd->transport_lun_stop_comp);
4128
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4129
				cmd->se_tfo->get_task_tag(cmd));
4130
	}
4131
	transport_remove_cmd_from_queue(cmd);
4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4145 4146 4147
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
4148
		list_del_init(&cmd->se_lun_node);
4149

4150 4151 4152 4153 4154
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
4155
		spin_lock(&cmd->t_state_lock);
4156
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4157
			"_lun_stop for  ITT: 0x%08x\n",
4158 4159
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4160
		cmd->transport_state |= CMD_T_LUN_STOP;
4161
		spin_unlock(&cmd->t_state_lock);
4162 4163 4164

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

4165 4166
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4167 4168
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4169 4170 4171 4172 4173 4174
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
4175
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4176 4177
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4178

4179
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4180 4181 4182 4183
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

4184
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4185
			"_wait_for_tasks(): SUCCESS\n",
4186 4187
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4188

4189
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4190
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4191
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4192 4193
			goto check_cond;
		}
4194
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4195
		transport_all_task_dev_remove_state(cmd);
4196
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212

		transport_free_dev_tasks(cmd);
		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
4213
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4214
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4215
			pr_debug("SE_LUN[%d] - Detected FE stop for"
4216 4217
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
4218
				cmd, cmd->se_tfo->get_task_tag(cmd));
4219

4220
			spin_unlock_irqrestore(&cmd->t_state_lock,
4221 4222
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
4223
			complete(&cmd->transport_lun_fe_stop_comp);
4224 4225 4226
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
4227
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4228
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4229

4230
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4231 4232 4233 4234 4235 4236 4237
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
4238
	struct se_lun *lun = p;
4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

4250
	kt = kthread_run(transport_clear_lun_thread, lun,
4251 4252
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
4253
		pr_err("Unable to start clear_lun thread\n");
4254
		return PTR_ERR(kt);
4255 4256 4257 4258 4259 4260
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

4261 4262 4263
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
4264
 *
4265 4266
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
4267
 */
4268
bool transport_wait_for_tasks(struct se_cmd *cmd)
4269 4270 4271
{
	unsigned long flags;

4272
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4273 4274
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4275
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4276
		return false;
4277 4278 4279 4280 4281
	}
	/*
	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
	 * has been set in transport_set_supported_SAM_opcode().
	 */
4282 4283
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4284
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4285
		return false;
4286
	}
4287 4288 4289
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
4290
	 * The cmd->transport_lun_stopped_sem will be upped by
4291 4292 4293
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
4294
	if (cmd->transport_state & CMD_T_LUN_STOP) {
4295
		pr_debug("wait_for_tasks: Stopping"
4296
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4297
			"_stop_comp); for ITT: 0x%08x\n",
4298
			cmd->se_tfo->get_task_tag(cmd));
4299 4300 4301 4302 4303 4304 4305
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
4306 4307 4308 4309
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
4310 4311 4312 4313 4314 4315 4316

		transport_all_task_dev_remove_state(cmd);
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
4317
		pr_debug("wait_for_tasks: Stopped"
4318
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4319
			"stop_comp); for ITT: 0x%08x\n",
4320
			cmd->se_tfo->get_task_tag(cmd));
4321

4322
		cmd->transport_state &= ~CMD_T_LUN_STOP;
4323
	}
4324 4325 4326

	if (!(cmd->transport_state & CMD_T_ACTIVE) ||
	     (cmd->transport_state & CMD_T_ABORTED)) {
4327
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4328
		return false;
4329
	}
4330

4331
	cmd->transport_state |= CMD_T_STOP;
4332

4333
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4334
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4335 4336
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4337

4338
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4339

4340
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4341

4342
	wait_for_completion(&cmd->t_transport_stop_comp);
4343

4344
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4345
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4346

4347
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4348
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4349
		cmd->se_tfo->get_task_tag(cmd));
4350

4351
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4352 4353

	return true;
4354
}
4355
EXPORT_SYMBOL(transport_wait_for_tasks);
4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

4389
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4390
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4391
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4392 4393 4394
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4395
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
4408
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4409 4410 4411 4412 4413 4414 4415
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
4416 4417
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4418
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4419 4420 4421 4422 4423
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
4424 4425 4426 4427
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4428
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4429 4430 4431 4432 4433 4434 4435 4436
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4437
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4438 4439 4440 4441 4442 4443 4444 4445
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4446
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4447 4448 4449 4450 4451 4452 4453 4454 4455
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4456
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4457 4458 4459 4460 4461 4462 4463 4464 4465 4466
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4467
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4468 4469
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4470 4471 4472 4473 4474 4475
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4476
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4477 4478
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4479 4480 4481 4482 4483 4484
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4485
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4496
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4507
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4508 4509 4510 4511 4512 4513 4514 4515 4516 4517
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4518
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4519 4520 4521 4522 4523 4524 4525 4526
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4527
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4528 4529 4530 4531 4532 4533 4534 4535 4536
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4537
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4538 4539 4540 4541 4542 4543 4544 4545 4546 4547
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4548
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
4566
	return cmd->se_tfo->queue_status(cmd);
4567 4568 4569 4570 4571 4572 4573
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

4574
	if (cmd->transport_state & CMD_T_ABORTED) {
4575
		if (!send_status ||
4576 4577 4578
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
#if 0
4579
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4580
			" status for CDB: 0x%02x ITT: 0x%08x\n",
4581
			cmd->t_task_cdb[0],
4582
			cmd->se_tfo->get_task_tag(cmd));
4583 4584
#endif
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4585
		cmd->se_tfo->queue_status(cmd);
4586 4587 4588 4589 4590 4591 4592 4593
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
4594 4595 4596 4597 4598 4599 4600 4601 4602
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

4603 4604 4605 4606 4607 4608 4609
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
4610
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4611
			cmd->transport_state |= CMD_T_ABORTED;
4612 4613 4614 4615 4616
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
4617
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4618
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4619
		cmd->se_tfo->get_task_tag(cmd));
4620
#endif
4621
	cmd->se_tfo->queue_status(cmd);
4622 4623
}

C
Christoph Hellwig 已提交
4624
static int transport_generic_do_tmr(struct se_cmd *cmd)
4625
{
4626
	struct se_device *dev = cmd->se_dev;
4627 4628 4629 4630
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
4631
	case TMR_ABORT_TASK:
4632 4633
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4634 4635 4636
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
4637 4638
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
4639
	case TMR_LUN_RESET:
4640 4641 4642 4643
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
4644
	case TMR_TARGET_WARM_RESET:
4645 4646
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4647
	case TMR_TARGET_COLD_RESET:
4648 4649 4650
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
4651
		pr_err("Uknown TMR function: 0x%02x.\n",
4652 4653 4654 4655 4656 4657
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4658
	cmd->se_tfo->queue_tm_rsp(cmd);
4659

4660
	transport_cmd_check_stop_to_fabric(cmd);
4661 4662 4663 4664 4665 4666 4667 4668 4669
	return 0;
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
4670
	int ret;
4671
	struct se_cmd *cmd;
J
Jörn Engel 已提交
4672
	struct se_device *dev = param;
4673 4674

	while (!kthread_should_stop()) {
4675 4676
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4677 4678 4679 4680 4681
				kthread_should_stop());
		if (ret < 0)
			goto out;

get_cmd:
4682 4683
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
4684 4685
			continue;

4686
		switch (cmd->t_state) {
4687 4688 4689
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
4690
		case TRANSPORT_NEW_CMD_MAP:
4691 4692
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
4693 4694 4695
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
4696
			ret = cmd->se_tfo->new_cmd_map(cmd);
4697
			if (ret < 0) {
4698
				transport_generic_request_failure(cmd);
4699 4700 4701
				break;
			}
			ret = transport_generic_new_cmd(cmd);
4702
			if (ret < 0) {
4703 4704
				transport_generic_request_failure(cmd);
				break;
4705 4706 4707 4708 4709 4710 4711 4712
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
4713
		case TRANSPORT_COMPLETE_QF_WP:
4714 4715 4716 4717
			transport_write_pending_qf(cmd);
			break;
		case TRANSPORT_COMPLETE_QF_OK:
			transport_complete_qf(cmd);
4718
			break;
4719
		default:
4720 4721 4722
			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
				"i_state: %d on SE LUN: %u\n",
				cmd->t_state,
4723 4724 4725
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
4726 4727 4728 4729 4730 4731 4732
			BUG();
		}

		goto get_cmd;
	}

out:
4733 4734
	WARN_ON(!list_empty(&dev->state_task_list));
	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4735 4736 4737
	dev->process_thread = NULL;
	return 0;
}