target_core_transport.c 131.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
 * Copyright (c) 2007-2010 Rising Tide Systems
 * Copyright (c) 2008-2010 Linux-iSCSI.org
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
39
#include <linux/module.h>
40
#include <linux/ratelimit.h>
41 42 43 44 45
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
46
#include <scsi/scsi_tcq.h>
47 48

#include <target/target_core_base.h>
49 50
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
51 52
#include <target/target_core_configfs.h>

C
Christoph Hellwig 已提交
53
#include "target_core_internal.h"
54 55 56 57
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

58
static int sub_api_initialized;
59

60
static struct workqueue_struct *target_completion_wq;
61 62 63 64 65 66 67 68 69
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;

static int transport_generic_write_pending(struct se_cmd *);
70
static int transport_processing_thread(void *param);
71
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72
static void transport_complete_task_attr(struct se_cmd *cmd);
73
static void transport_handle_queue_full(struct se_cmd *cmd,
74
		struct se_device *dev);
75
static void transport_free_dev_tasks(struct se_cmd *cmd);
76
static int transport_generic_get_mem(struct se_cmd *cmd);
77
static void transport_put_cmd(struct se_cmd *cmd);
78
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
79
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
80
static void target_complete_ok_work(struct work_struct *work);
81

82
int init_se_kmem_caches(void)
83 84 85 86
{
	se_sess_cache = kmem_cache_create("se_sess_cache",
			sizeof(struct se_session), __alignof__(struct se_session),
			0, NULL);
87 88
	if (!se_sess_cache) {
		pr_err("kmem_cache_create() for struct se_session"
89
				" failed\n");
90
		goto out;
91 92 93 94
	}
	se_ua_cache = kmem_cache_create("se_ua_cache",
			sizeof(struct se_ua), __alignof__(struct se_ua),
			0, NULL);
95 96
	if (!se_ua_cache) {
		pr_err("kmem_cache_create() for struct se_ua failed\n");
97
		goto out_free_sess_cache;
98 99 100 101
	}
	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
			sizeof(struct t10_pr_registration),
			__alignof__(struct t10_pr_registration), 0, NULL);
102 103
	if (!t10_pr_reg_cache) {
		pr_err("kmem_cache_create() for struct t10_pr_registration"
104
				" failed\n");
105
		goto out_free_ua_cache;
106 107 108 109
	}
	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
			0, NULL);
110 111
	if (!t10_alua_lu_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
112
				" failed\n");
113
		goto out_free_pr_reg_cache;
114 115 116 117
	}
	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
			sizeof(struct t10_alua_lu_gp_member),
			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
118 119
	if (!t10_alua_lu_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
120
				"cache failed\n");
121
		goto out_free_lu_gp_cache;
122 123 124 125
	}
	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
			sizeof(struct t10_alua_tg_pt_gp),
			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
126 127
	if (!t10_alua_tg_pt_gp_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
128
				"cache failed\n");
129
		goto out_free_lu_gp_mem_cache;
130 131 132 133 134 135
	}
	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
			"t10_alua_tg_pt_gp_mem_cache",
			sizeof(struct t10_alua_tg_pt_gp_member),
			__alignof__(struct t10_alua_tg_pt_gp_member),
			0, NULL);
136 137
	if (!t10_alua_tg_pt_gp_mem_cache) {
		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
138
				"mem_t failed\n");
139
		goto out_free_tg_pt_gp_cache;
140 141
	}

142 143 144 145 146
	target_completion_wq = alloc_workqueue("target_completion",
					       WQ_MEM_RECLAIM, 0);
	if (!target_completion_wq)
		goto out_free_tg_pt_gp_mem_cache;

147
	return 0;
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

out_free_tg_pt_gp_mem_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
out_free_lu_gp_cache:
	kmem_cache_destroy(t10_alua_lu_gp_cache);
out_free_pr_reg_cache:
	kmem_cache_destroy(t10_pr_reg_cache);
out_free_ua_cache:
	kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
	kmem_cache_destroy(se_sess_cache);
163
out:
164
	return -ENOMEM;
165 166
}

167
void release_se_kmem_caches(void)
168
{
169
	destroy_workqueue(target_completion_wq);
170 171 172 173 174 175 176 177 178
	kmem_cache_destroy(se_sess_cache);
	kmem_cache_destroy(se_ua_cache);
	kmem_cache_destroy(t10_pr_reg_cache);
	kmem_cache_destroy(t10_alua_lu_gp_cache);
	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
}

179 180 181
/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182 183 184 185 186 187 188 189

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{
	u32 new_index;

190
	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
191

192 193 194
	spin_lock(&scsi_mib_index_lock);
	new_index = ++scsi_mib_index[type];
	spin_unlock(&scsi_mib_index_lock);
195 196 197 198

	return new_index;
}

C
Christoph Hellwig 已提交
199
static void transport_init_queue_obj(struct se_queue_obj *qobj)
200 201 202 203 204 205 206
{
	atomic_set(&qobj->queue_cnt, 0);
	INIT_LIST_HEAD(&qobj->qobj_list);
	init_waitqueue_head(&qobj->thread_wq);
	spin_lock_init(&qobj->cmd_queue_lock);
}

207
void transport_subsystem_check_init(void)
208 209 210
{
	int ret;

211 212 213
	if (sub_api_initialized)
		return;

214 215
	ret = request_module("target_core_iblock");
	if (ret != 0)
216
		pr_err("Unable to load target_core_iblock\n");
217 218 219

	ret = request_module("target_core_file");
	if (ret != 0)
220
		pr_err("Unable to load target_core_file\n");
221 222 223

	ret = request_module("target_core_pscsi");
	if (ret != 0)
224
		pr_err("Unable to load target_core_pscsi\n");
225 226 227

	ret = request_module("target_core_stgt");
	if (ret != 0)
228
		pr_err("Unable to load target_core_stgt\n");
229

230
	sub_api_initialized = 1;
231
	return;
232 233 234 235 236 237 238
}

struct se_session *transport_init_session(void)
{
	struct se_session *se_sess;

	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
239 240
	if (!se_sess) {
		pr_err("Unable to allocate struct se_session from"
241 242 243 244 245
				" se_sess_cache\n");
		return ERR_PTR(-ENOMEM);
	}
	INIT_LIST_HEAD(&se_sess->sess_list);
	INIT_LIST_HEAD(&se_sess->sess_acl_list);
246 247 248
	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
	INIT_LIST_HEAD(&se_sess->sess_wait_list);
	spin_lock_init(&se_sess->sess_cmd_lock);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

	return se_sess;
}
EXPORT_SYMBOL(transport_init_session);

/*
 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	unsigned char buf[PR_REG_ISID_LEN];

	se_sess->se_tpg = se_tpg;
	se_sess->fabric_sess_ptr = fabric_sess_ptr;
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
	 *
	 * Only set for struct se_session's that will actually be moving I/O.
	 * eg: *NOT* discovery sessions.
	 */
	if (se_nacl) {
		/*
		 * If the fabric module supports an ISID based TransportID,
		 * save this value in binary from the fabric I_T Nexus now.
		 */
278
		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
279
			memset(&buf[0], 0, PR_REG_ISID_LEN);
280
			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
					&buf[0], PR_REG_ISID_LEN);
			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
		}
		spin_lock_irq(&se_nacl->nacl_sess_lock);
		/*
		 * The se_nacl->nacl_sess pointer will be set to the
		 * last active I_T Nexus for each struct se_node_acl.
		 */
		se_nacl->nacl_sess = se_sess;

		list_add_tail(&se_sess->sess_acl_list,
			      &se_nacl->acl_sess_list);
		spin_unlock_irq(&se_nacl->nacl_sess_lock);
	}
	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

297
	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
298
		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
}
EXPORT_SYMBOL(__transport_register_session);

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{
	spin_lock_bh(&se_tpg->session_lock);
	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
	spin_unlock_bh(&se_tpg->session_lock);
}
EXPORT_SYMBOL(transport_register_session);

void transport_deregister_session_configfs(struct se_session *se_sess)
{
	struct se_node_acl *se_nacl;
317
	unsigned long flags;
318 319 320 321
	/*
	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
	 */
	se_nacl = se_sess->se_node_acl;
322
	if (se_nacl) {
323
		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
324 325 326 327 328 329 330 331 332 333 334 335 336
		list_del(&se_sess->sess_acl_list);
		/*
		 * If the session list is empty, then clear the pointer.
		 * Otherwise, set the struct se_session pointer from the tail
		 * element of the per struct se_node_acl active session list.
		 */
		if (list_empty(&se_nacl->acl_sess_list))
			se_nacl->nacl_sess = NULL;
		else {
			se_nacl->nacl_sess = container_of(
					se_nacl->acl_sess_list.prev,
					struct se_session, sess_acl_list);
		}
337
		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
338 339 340 341 342 343 344 345 346 347 348 349 350 351
	}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);

void transport_free_session(struct se_session *se_sess)
{
	kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);

void transport_deregister_session(struct se_session *se_sess)
{
	struct se_portal_group *se_tpg = se_sess->se_tpg;
	struct se_node_acl *se_nacl;
352
	unsigned long flags;
353

354
	if (!se_tpg) {
355 356 357 358
		transport_free_session(se_sess);
		return;
	}

359
	spin_lock_irqsave(&se_tpg->session_lock, flags);
360 361 362
	list_del(&se_sess->sess_list);
	se_sess->se_tpg = NULL;
	se_sess->fabric_sess_ptr = NULL;
363
	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
364 365 366 367 368 369

	/*
	 * Determine if we need to do extra work for this initiator node's
	 * struct se_node_acl if it had been previously dynamically generated.
	 */
	se_nacl = se_sess->se_node_acl;
370
	if (se_nacl) {
371
		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
372
		if (se_nacl->dynamic_node_acl) {
373 374
			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
					se_tpg)) {
375 376
				list_del(&se_nacl->acl_list);
				se_tpg->num_node_acls--;
377
				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
378 379 380

				core_tpg_wait_for_nacl_pr_ref(se_nacl);
				core_free_device_list_for_node(se_nacl, se_tpg);
381
				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
382
						se_nacl);
383
				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
384 385
			}
		}
386
		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
387 388 389 390
	}

	transport_free_session(se_sess);

391
	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
392
		se_tpg->se_tpg_tfo->get_fabric_name());
393 394 395 396
}
EXPORT_SYMBOL(transport_deregister_session);

/*
397
 * Called with cmd->t_state_lock held.
398 399 400
 */
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
401
	struct se_device *dev = cmd->se_dev;
402 403 404
	struct se_task *task;
	unsigned long flags;

405 406
	if (!dev)
		return;
407

408
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
409
		if (task->task_flags & TF_ACTIVE)
410 411 412
			continue;

		spin_lock_irqsave(&dev->execute_task_lock, flags);
413 414 415
		if (task->t_state_active) {
			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
				cmd->se_tfo->get_task_tag(cmd), dev, task);
416

417 418 419 420 421
			list_del(&task->t_state_list);
			atomic_dec(&cmd->t_task_cdbs_ex_left);
			task->t_state_active = false;
		}
		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
422
	}
423

424 425 426 427
}

/*	transport_cmd_check_stop():
 *
428
 *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
429 430 431 432 433 434 435 436 437 438 439 440
 *	'transport_off = 2' determines if task_dev_state should be removed.
 *
 *	A non-zero u8 t_state sets cmd->t_state.
 *	Returns 1 when command is stopped, else 0.
 */
static int transport_cmd_check_stop(
	struct se_cmd *cmd,
	int transport_off,
	u8 t_state)
{
	unsigned long flags;

441
	spin_lock_irqsave(&cmd->t_state_lock, flags);
442 443 444 445
	/*
	 * Determine if IOCTL context caller in requesting the stopping of this
	 * command for LUN shutdown purposes.
	 */
446 447 448
	if (cmd->transport_state & CMD_T_LUN_STOP) {
		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
449

450
		cmd->transport_state &= ~CMD_T_ACTIVE;
451 452
		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);
453
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
454

455
		complete(&cmd->transport_lun_stop_comp);
456 457 458 459
		return 1;
	}
	/*
	 * Determine if frontend context caller is requesting the stopping of
460
	 * this command for frontend exceptions.
461
	 */
462 463 464
	if (cmd->transport_state & CMD_T_STOP) {
		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
			__func__, __LINE__,
465
			cmd->se_tfo->get_task_tag(cmd));
466 467 468 469 470 471 472 473 474 475

		if (transport_off == 2)
			transport_all_task_dev_remove_state(cmd);

		/*
		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
		 * to FE.
		 */
		if (transport_off == 2)
			cmd->se_lun = NULL;
476
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
477

478
		complete(&cmd->t_transport_stop_comp);
479 480 481
		return 1;
	}
	if (transport_off) {
482
		cmd->transport_state &= ~CMD_T_ACTIVE;
483 484 485 486 487 488 489 490 491
		if (transport_off == 2) {
			transport_all_task_dev_remove_state(cmd);
			/*
			 * Clear struct se_cmd->se_lun before the transport_off == 2
			 * handoff to fabric module.
			 */
			cmd->se_lun = NULL;
			/*
			 * Some fabric modules like tcm_loop can release
L
Lucas De Marchi 已提交
492
			 * their internally allocated I/O reference now and
493
			 * struct se_cmd now.
494 495 496 497
			 *
			 * Fabric modules are expected to return '1' here if the
			 * se_cmd being passed is released at this point,
			 * or zero if not being released.
498
			 */
499
			if (cmd->se_tfo->check_stop_free != NULL) {
500
				spin_unlock_irqrestore(
501
					&cmd->t_state_lock, flags);
502

503
				return cmd->se_tfo->check_stop_free(cmd);
504 505
			}
		}
506
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
507 508 509 510

		return 0;
	} else if (t_state)
		cmd->t_state = t_state;
511
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
512 513 514 515 516 517 518 519 520 521 522

	return 0;
}

static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
	return transport_cmd_check_stop(cmd, 2, 0);
}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
523
	struct se_lun *lun = cmd->se_lun;
524 525 526 527 528
	unsigned long flags;

	if (!lun)
		return;

529
	spin_lock_irqsave(&cmd->t_state_lock, flags);
530 531 532
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
		transport_all_task_dev_remove_state(cmd);
533
	}
534
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
535 536

	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
537 538
	if (!list_empty(&cmd->se_lun_node))
		list_del_init(&cmd->se_lun_node);
539 540 541 542 543
	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
}

void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
544
	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
545
		transport_lun_remove_cmd(cmd);
546 547 548

	if (transport_cmd_check_stop_to_fabric(cmd))
		return;
549
	if (remove) {
550
		transport_remove_cmd_from_queue(cmd);
551
		transport_put_cmd(cmd);
552
	}
553 554
}

555 556
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
		bool at_head)
557 558
{
	struct se_device *dev = cmd->se_dev;
559
	struct se_queue_obj *qobj = &dev->dev_queue_obj;
560 561 562
	unsigned long flags;

	if (t_state) {
563
		spin_lock_irqsave(&cmd->t_state_lock, flags);
564
		cmd->t_state = t_state;
565
		cmd->transport_state |= CMD_T_ACTIVE;
566
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
567 568 569
	}

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
570 571 572 573 574 575 576

	/* If the cmd is already on the list, remove it before we add it */
	if (!list_empty(&cmd->se_queue_node))
		list_del(&cmd->se_queue_node);
	else
		atomic_inc(&qobj->queue_cnt);

577
	if (at_head)
578
		list_add(&cmd->se_queue_node, &qobj->qobj_list);
579
	else
580
		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
581
	cmd->transport_state |= CMD_T_QUEUED;
582 583 584 585 586
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

	wake_up_interruptible(&qobj->thread_wq);
}

587 588
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
589
{
590
	struct se_cmd *cmd;
591 592 593 594 595 596 597
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
	if (list_empty(&qobj->qobj_list)) {
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return NULL;
	}
598
	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
599

600
	cmd->transport_state &= ~CMD_T_QUEUED;
601
	list_del_init(&cmd->se_queue_node);
602 603 604
	atomic_dec(&qobj->queue_cnt);
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

605
	return cmd;
606 607
}

608
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
609
{
610
	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
611 612 613
	unsigned long flags;

	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
614
	if (!(cmd->transport_state & CMD_T_QUEUED)) {
615 616 617
		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
		return;
	}
618
	cmd->transport_state &= ~CMD_T_QUEUED;
619 620
	atomic_dec(&qobj->queue_cnt);
	list_del_init(&cmd->se_queue_node);
621 622 623 624 625 626 627 628 629
	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}

/*
 * Completion function used by TCM subsystem plugins (such as FILEIO)
 * for queueing up response from struct se_subsystem_api->do_task()
 */
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
630
	struct se_task *task = list_entry(cmd->t_task_list.next,
631 632 633 634 635 636 637
				struct se_task, t_list);

	if (good) {
		cmd->scsi_status = SAM_STAT_GOOD;
		task->task_scsi_status = GOOD;
	} else {
		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
638 639 640
		task->task_se_cmd->scsi_sense_reason =
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

641 642 643 644 645 646
	}

	transport_complete_task(task, good);
}
EXPORT_SYMBOL(transport_complete_sync_cache);

647 648 649 650
static void target_complete_failure_work(struct work_struct *work)
{
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);

651
	transport_generic_request_failure(cmd);
652 653
}

654 655 656 657 658 659 660
/*	transport_complete_task():
 *
 *	Called from interrupt and non interrupt context depending
 *	on the transport plugin.
 */
void transport_complete_task(struct se_task *task, int success)
{
661
	struct se_cmd *cmd = task->task_se_cmd;
662
	struct se_device *dev = cmd->se_dev;
663 664
	unsigned long flags;

665
	spin_lock_irqsave(&cmd->t_state_lock, flags);
666
	task->task_flags &= ~TF_ACTIVE;
667 668 669 670 671 672 673 674 675

	/*
	 * See if any sense data exists, if so set the TASK_SENSE flag.
	 * Also check for any other post completion work that needs to be
	 * done by the plugins.
	 */
	if (dev && dev->transport->transport_complete) {
		if (dev->transport->transport_complete(task) != 0) {
			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
676
			task->task_flags |= TF_HAS_SENSE;
677 678 679 680 681 682 683 684
			success = 1;
		}
	}

	/*
	 * See if we are waiting for outstanding struct se_task
	 * to complete for an exception condition
	 */
685
	if (task->task_flags & TF_REQUEST_STOP) {
686
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
687 688 689
		complete(&task->task_stop_comp);
		return;
	}
690 691

	if (!success)
692
		cmd->transport_state |= CMD_T_FAILED;
693

694 695 696 697 698
	/*
	 * Decrement the outstanding t_task_cdbs_left count.  The last
	 * struct se_task from struct se_cmd will complete itself into the
	 * device queue depending upon int success.
	 */
699
	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
700
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701 702
		return;
	}
703 704 705 706 707 708 709 710 711 712
	/*
	 * Check for case where an explict ABORT_TASK has been received
	 * and transport_wait_for_tasks() will be waiting for completion..
	 */
	if (cmd->transport_state & CMD_T_ABORTED &&
	    cmd->transport_state & CMD_T_STOP) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->t_transport_stop_comp);
		return;
	} else if (cmd->transport_state & CMD_T_FAILED) {
713
		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
714
		INIT_WORK(&cmd->work, target_complete_failure_work);
715
	} else {
716
		INIT_WORK(&cmd->work, target_complete_ok_work);
717
	}
718 719

	cmd->t_state = TRANSPORT_COMPLETE;
720
	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
721
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
722

723
	queue_work(target_completion_wq, &cmd->work);
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
}
EXPORT_SYMBOL(transport_complete_task);

/*
 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
 * struct se_task list are ready to be added to the active execution list
 * struct se_device

 * Called with se_dev_t->execute_task_lock called.
 */
static inline int transport_add_task_check_sam_attr(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	/*
	 * No SAM Task attribute emulation enabled, add to tail of
	 * execution queue
	 */
	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
		return 0;
	}
	/*
	 * HEAD_OF_QUEUE attribute for received CDB, which means
	 * the first task that is associated with a struct se_cmd goes to
	 * head of the struct se_device->execute_task_list, and task_prev
	 * after that for each subsequent task
	 */
753
	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
754 755 756 757 758
		list_add(&task->t_execute_list,
				(task_prev != NULL) ?
				&task_prev->t_execute_list :
				&dev->execute_task_list);

759
		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
760
				" in execution queue\n",
761
				task->task_se_cmd->t_task_cdb[0]);
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
		return 1;
	}
	/*
	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
	 * transitioned from Dermant -> Active state, and are added to the end
	 * of the struct se_device->execute_task_list
	 */
	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
	return 0;
}

/*	__transport_add_task_to_execute_queue():
 *
 *	Called with se_dev_t->execute_task_lock called.
 */
static void __transport_add_task_to_execute_queue(
	struct se_task *task,
	struct se_task *task_prev,
	struct se_device *dev)
{
	int head_of_queue;

	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
	atomic_inc(&dev->execute_tasks);

787
	if (task->t_state_active)
788 789 790 791 792 793 794 795 796 797 798 799 800
		return;
	/*
	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
	 * state list as well.  Running with SAM Task Attribute emulation
	 * will always return head_of_queue == 0 here
	 */
	if (head_of_queue)
		list_add(&task->t_state_list, (task_prev) ?
				&task_prev->t_state_list :
				&dev->state_task_list);
	else
		list_add_tail(&task->t_state_list, &dev->state_task_list);

801
	task->t_state_active = true;
802

803
	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
804
		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
805 806 807 808 809
		task, dev);
}

static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
810
	struct se_device *dev = cmd->se_dev;
811 812 813
	struct se_task *task;
	unsigned long flags;

814 815
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
816
		spin_lock(&dev->execute_task_lock);
817 818 819 820 821 822 823 824 825
		if (!task->t_state_active) {
			list_add_tail(&task->t_state_list,
				      &dev->state_task_list);
			task->t_state_active = true;

			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
				task->task_se_cmd->se_tfo->get_task_tag(
				task->task_se_cmd), task, dev);
		}
826 827
		spin_unlock(&dev->execute_task_lock);
	}
828
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
829 830
}

831
static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
832
{
833
	struct se_device *dev = cmd->se_dev;
834 835
	struct se_task *task, *task_prev = NULL;

836
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
837
		if (!list_empty(&task->t_execute_list))
838 839 840 841 842 843 844 845
			continue;
		/*
		 * __transport_add_task_to_execute_queue() handles the
		 * SAM Task Attribute emulation if enabled
		 */
		__transport_add_task_to_execute_queue(task, task_prev, dev);
		task_prev = task;
	}
846 847 848 849 850 851 852 853 854
}

static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
	unsigned long flags;
	struct se_device *dev = cmd->se_dev;

	spin_lock_irqsave(&dev->execute_task_lock, flags);
	__transport_add_tasks_from_cmd(cmd);
855 856 857
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

858 859 860 861 862 863 864
void __transport_remove_task_from_execute_queue(struct se_task *task,
		struct se_device *dev)
{
	list_del_init(&task->t_execute_list);
	atomic_dec(&dev->execute_tasks);
}

C
Christoph Hellwig 已提交
865
static void transport_remove_task_from_execute_queue(
866 867 868 869 870
	struct se_task *task,
	struct se_device *dev)
{
	unsigned long flags;

871
	if (WARN_ON(list_empty(&task->t_execute_list)))
872 873
		return;

874
	spin_lock_irqsave(&dev->execute_task_lock, flags);
875
	__transport_remove_task_from_execute_queue(task, dev);
876 877 878
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}

879
/*
880
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
881 882 883 884 885 886
 */

static void target_qf_do_work(struct work_struct *work)
{
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
887
	LIST_HEAD(qf_cmd_list);
888 889 890
	struct se_cmd *cmd, *cmd_tmp;

	spin_lock_irq(&dev->qf_cmd_lock);
891 892
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);
893

894
	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
895 896 897 898
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();

899
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
900
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
901
			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
902 903
			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
			: "UNKNOWN");
904 905

		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
906 907 908
	}
}

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
	switch (cmd->data_direction) {
	case DMA_NONE:
		return "NONE";
	case DMA_FROM_DEVICE:
		return "READ";
	case DMA_TO_DEVICE:
		return "WRITE";
	case DMA_BIDIRECTIONAL:
		return "BIDI";
	default:
		break;
	}

	return "UNKNOWN";
}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{
	*bl += sprintf(b + *bl, "Status: ");
	switch (dev->dev_status) {
	case TRANSPORT_DEVICE_ACTIVATED:
		*bl += sprintf(b + *bl, "ACTIVATED");
		break;
	case TRANSPORT_DEVICE_DEACTIVATED:
		*bl += sprintf(b + *bl, "DEACTIVATED");
		break;
	case TRANSPORT_DEVICE_SHUTDOWN:
		*bl += sprintf(b + *bl, "SHUTDOWN");
		break;
	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
		*bl += sprintf(b + *bl, "OFFLINE");
		break;
	default:
		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
		break;
	}

952 953
	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
		atomic_read(&dev->execute_tasks), dev->queue_depth);
954
	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
955
		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	*bl += sprintf(b + *bl, "        ");
}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int len;

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Protocol Identifier: ");

	switch (vpd->protocol_identifier) {
	case 0x00:
		sprintf(buf+len, "Fibre Channel\n");
		break;
	case 0x10:
		sprintf(buf+len, "Parallel SCSI\n");
		break;
	case 0x20:
		sprintf(buf+len, "SSA\n");
		break;
	case 0x30:
		sprintf(buf+len, "IEEE 1394\n");
		break;
	case 0x40:
		sprintf(buf+len, "SCSI Remote Direct Memory Access"
				" Protocol\n");
		break;
	case 0x50:
		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
		break;
	case 0x60:
		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
		break;
	case 0x70:
		sprintf(buf+len, "Automation/Drive Interface Transport"
				" Protocol\n");
		break;
	case 0x80:
		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n",
				vpd->protocol_identifier);
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1009
		pr_debug("%s", buf);
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * Check if the Protocol Identifier Valid (PIV) bit is set..
	 *
	 * from spc3r23.pdf section 7.5.1
	 */
	 if (page_83[1] & 0x80) {
		vpd->protocol_identifier = (page_83[0] & 0xf0);
		vpd->protocol_identifier_set = 1;
		transport_dump_vpd_proto_id(vpd, NULL, 0);
	}
}
EXPORT_SYMBOL(transport_set_vpd_proto_id);

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1034 1035
	int ret = 0;
	int len;
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Association: ");

	switch (vpd->association) {
	case 0x00:
		sprintf(buf+len, "addressed logical unit\n");
		break;
	case 0x10:
		sprintf(buf+len, "target port\n");
		break;
	case 0x20:
		sprintf(buf+len, "SCSI target device\n");
		break;
	default:
		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1052
		ret = -EINVAL;
1053 1054 1055 1056 1057 1058
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1059
		pr_debug("%s", buf);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081

	return ret;
}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identification association..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 297
	 */
	vpd->association = (page_83[1] & 0x30);
	return transport_dump_vpd_assoc(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_assoc);

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
1082 1083
	int ret = 0;
	int len;
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109

	memset(buf, 0, VPD_TMP_BUF_SIZE);
	len = sprintf(buf, "T10 VPD Identifier Type: ");

	switch (vpd->device_identifier_type) {
	case 0x00:
		sprintf(buf+len, "Vendor specific\n");
		break;
	case 0x01:
		sprintf(buf+len, "T10 Vendor ID based\n");
		break;
	case 0x02:
		sprintf(buf+len, "EUI-64 based\n");
		break;
	case 0x03:
		sprintf(buf+len, "NAA\n");
		break;
	case 0x04:
		sprintf(buf+len, "Relative target port identifier\n");
		break;
	case 0x08:
		sprintf(buf+len, "SCSI name string\n");
		break;
	default:
		sprintf(buf+len, "Unsupported: 0x%02x\n",
				vpd->device_identifier_type);
1110
		ret = -EINVAL;
1111 1112 1113
		break;
	}

1114 1115 1116
	if (p_buf) {
		if (p_buf_len < strlen(buf)+1)
			return -EINVAL;
1117
		strncpy(p_buf, buf, p_buf_len);
1118
	} else {
1119
		pr_debug("%s", buf);
1120
	}
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

	return ret;
}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{
	/*
	 * The VPD identifier type..
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 298
	 */
	vpd->device_identifier_type = (page_83[1] & 0x0f);
	return transport_dump_vpd_ident_type(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident_type);

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{
	unsigned char buf[VPD_TMP_BUF_SIZE];
	int ret = 0;

	memset(buf, 0, VPD_TMP_BUF_SIZE);

	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x02: /* ASCII */
		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	case 0x03: /* UTF-8 */
		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
			&vpd->device_identifier[0]);
		break;
	default:
		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
			" 0x%02x", vpd->device_identifier_code_set);
1163
		ret = -EINVAL;
1164 1165 1166 1167 1168 1169
		break;
	}

	if (p_buf)
		strncpy(p_buf, buf, p_buf_len);
	else
1170
		pr_debug("%s", buf);
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

	return ret;
}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
	static const char hex_str[] = "0123456789abcdef";
	int j = 0, i = 4; /* offset to start of the identifer */

	/*
	 * The VPD Code Set (encoding)
	 *
	 * from spc3r23.pdf Section 7.6.3.1 Table 296
	 */
	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
	switch (vpd->device_identifier_code_set) {
	case 0x01: /* Binary */
		vpd->device_identifier[j++] =
				hex_str[vpd->device_identifier_type];
		while (i < (4 + page_83[3])) {
			vpd->device_identifier[j++] =
				hex_str[(page_83[i] & 0xf0) >> 4];
			vpd->device_identifier[j++] =
				hex_str[page_83[i] & 0x0f];
			i++;
		}
		break;
	case 0x02: /* ASCII */
	case 0x03: /* UTF-8 */
		while (i < (4 + page_83[3]))
			vpd->device_identifier[j++] = page_83[i++];
		break;
	default:
		break;
	}

	return transport_dump_vpd_ident(vpd, NULL, 0);
}
EXPORT_SYMBOL(transport_set_vpd_ident);

static void core_setup_task_attr_emulation(struct se_device *dev)
{
	/*
	 * If this device is from Target_Core_Mod/pSCSI, disable the
	 * SAM Task Attribute emulation.
	 *
	 * This is currently not available in upsream Linux/SCSI Target
	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
	 */
1221
	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1222 1223 1224 1225 1226
		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
		return;
	}

	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1227
	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1228 1229
		" device\n", dev->transport->name,
		dev->transport->get_device_rev(dev));
1230 1231 1232 1233
}

static void scsi_dump_inquiry(struct se_device *dev)
{
1234
	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1235
	char buf[17];
1236 1237 1238 1239 1240 1241
	int i, device_type;
	/*
	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
	 */
	for (i = 0; i < 8; i++)
		if (wwn->vendor[i] >= 0x20)
1242
			buf[i] = wwn->vendor[i];
1243
		else
1244 1245 1246
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Vendor: %s\n", buf);
1247 1248 1249

	for (i = 0; i < 16; i++)
		if (wwn->model[i] >= 0x20)
1250
			buf[i] = wwn->model[i];
1251
		else
1252 1253 1254
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Model: %s\n", buf);
1255 1256 1257

	for (i = 0; i < 4; i++)
		if (wwn->revision[i] >= 0x20)
1258
			buf[i] = wwn->revision[i];
1259
		else
1260 1261 1262
			buf[i] = ' ';
	buf[i] = '\0';
	pr_debug("  Revision: %s\n", buf);
1263

1264
	device_type = dev->transport->get_device_type(dev);
1265 1266
	pr_debug("  Type:   %s ", scsi_device_type(device_type));
	pr_debug("                 ANSI SCSI revision: %02x\n",
1267
				dev->transport->get_device_rev(dev));
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
}

struct se_device *transport_add_device_to_core_hba(
	struct se_hba *hba,
	struct se_subsystem_api *transport,
	struct se_subsystem_dev *se_dev,
	u32 device_flags,
	void *transport_dev,
	struct se_dev_limits *dev_limits,
	const char *inquiry_prod,
	const char *inquiry_rev)
{
1280
	int force_pt;
1281 1282 1283
	struct se_device  *dev;

	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1284 1285
	if (!dev) {
		pr_err("Unable to allocate memory for se_dev_t\n");
1286 1287 1288
		return NULL;
	}

1289
	transport_init_queue_obj(&dev->dev_queue_obj);
1290 1291
	dev->dev_flags		= device_flags;
	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1292
	dev->dev_ptr		= transport_dev;
1293 1294 1295 1296 1297 1298 1299 1300 1301
	dev->se_hba		= hba;
	dev->se_sub_dev		= se_dev;
	dev->transport		= transport;
	INIT_LIST_HEAD(&dev->dev_list);
	INIT_LIST_HEAD(&dev->dev_sep_list);
	INIT_LIST_HEAD(&dev->dev_tmr_list);
	INIT_LIST_HEAD(&dev->execute_task_list);
	INIT_LIST_HEAD(&dev->delayed_cmd_list);
	INIT_LIST_HEAD(&dev->state_task_list);
1302
	INIT_LIST_HEAD(&dev->qf_cmd_list);
1303 1304 1305 1306 1307 1308
	spin_lock_init(&dev->execute_task_lock);
	spin_lock_init(&dev->delayed_cmd_lock);
	spin_lock_init(&dev->dev_reservation_lock);
	spin_lock_init(&dev->dev_status_lock);
	spin_lock_init(&dev->se_port_lock);
	spin_lock_init(&dev->se_tmr_lock);
1309
	spin_lock_init(&dev->qf_cmd_lock);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	atomic_set(&dev->dev_ordered_id, 0);

	se_dev_set_default_attribs(dev, dev_limits);

	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
	dev->creation_time = get_jiffies_64();
	spin_lock_init(&dev->stats_lock);

	spin_lock(&hba->device_lock);
	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
	hba->dev_count++;
	spin_unlock(&hba->device_lock);
	/*
	 * Setup the SAM Task Attribute emulation for struct se_device
	 */
	core_setup_task_attr_emulation(dev);
	/*
	 * Force PR and ALUA passthrough emulation with internal object use.
	 */
	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
	/*
	 * Setup the Reservations infrastructure for struct se_device
	 */
	core_setup_reservations(dev, force_pt);
	/*
	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
	 */
	if (core_setup_alua(dev, force_pt) < 0)
		goto out;

	/*
	 * Startup the struct se_device processing thread
	 */
	dev->process_thread = kthread_run(transport_processing_thread, dev,
1344
					  "LIO_%s", dev->transport->name);
1345
	if (IS_ERR(dev->process_thread)) {
1346
		pr_err("Unable to create kthread: LIO_%s\n",
1347
			dev->transport->name);
1348 1349
		goto out;
	}
1350 1351 1352 1353
	/*
	 * Setup work_queue for QUEUE_FULL
	 */
	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1354 1355 1356 1357 1358 1359 1360 1361
	/*
	 * Preload the initial INQUIRY const values if we are doing
	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
	 * passthrough because this is being provided by the backend LLD.
	 * This is required so that transport_get_inquiry() copies these
	 * originals once back into DEV_T10_WWN(dev) for the virtual device
	 * setup.
	 */
1362
	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1363
		if (!inquiry_prod || !inquiry_rev) {
1364
			pr_err("All non TCM/pSCSI plugins require"
1365 1366 1367 1368
				" INQUIRY consts\n");
			goto out;
		}

1369 1370 1371
		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1372 1373 1374
	}
	scsi_dump_inquiry(dev);

1375
	return dev;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
out:
	kthread_stop(dev->process_thread);

	spin_lock(&hba->device_lock);
	list_del(&dev->dev_list);
	hba->dev_count--;
	spin_unlock(&hba->device_lock);

	se_release_vpd_for_dev(dev);

	kfree(dev);

	return NULL;
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);

/*	transport_generic_prepare_cdb():
 *
 *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
 *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
 *	The point of this is since we are mapping iSCSI LUNs to
 *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
 *	devices and HBAs for a loop.
 */
static inline void transport_generic_prepare_cdb(
	unsigned char *cdb)
{
	switch (cdb[0]) {
	case READ_10: /* SBC - RDProtect */
	case READ_12: /* SBC - RDProtect */
	case READ_16: /* SBC - RDProtect */
	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
	case VERIFY: /* SBC - VRProtect */
	case VERIFY_16: /* SBC - VRProtect */
	case WRITE_VERIFY: /* SBC - VRProtect */
	case WRITE_VERIFY_12: /* SBC - VRProtect */
		break;
	default:
		cdb[1] &= 0x1f; /* clear logical unit number */
		break;
	}
}

static struct se_task *
transport_generic_get_task(struct se_cmd *cmd,
		enum dma_data_direction data_direction)
{
	struct se_task *task;
1424
	struct se_device *dev = cmd->se_dev;
1425

1426
	task = dev->transport->alloc_task(cmd->t_task_cdb);
1427
	if (!task) {
1428
		pr_err("Unable to allocate struct se_task\n");
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
		return NULL;
	}

	INIT_LIST_HEAD(&task->t_list);
	INIT_LIST_HEAD(&task->t_execute_list);
	INIT_LIST_HEAD(&task->t_state_list);
	init_completion(&task->task_stop_comp);
	task->task_se_cmd = cmd;
	task->task_data_direction = data_direction;

	return task;
}

static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 */
void transport_init_se_cmd(
	struct se_cmd *cmd,
	struct target_core_fabric_ops *tfo,
	struct se_session *se_sess,
	u32 data_length,
	int data_direction,
	int task_attr,
	unsigned char *sense_buffer)
{
1457 1458
	INIT_LIST_HEAD(&cmd->se_lun_node);
	INIT_LIST_HEAD(&cmd->se_delayed_node);
1459
	INIT_LIST_HEAD(&cmd->se_qf_node);
1460
	INIT_LIST_HEAD(&cmd->se_queue_node);
1461
	INIT_LIST_HEAD(&cmd->se_cmd_list);
1462 1463 1464 1465
	INIT_LIST_HEAD(&cmd->t_task_list);
	init_completion(&cmd->transport_lun_fe_stop_comp);
	init_completion(&cmd->transport_lun_stop_comp);
	init_completion(&cmd->t_transport_stop_comp);
1466
	init_completion(&cmd->cmd_wait_comp);
1467
	spin_lock_init(&cmd->t_state_lock);
1468
	cmd->transport_state = CMD_T_DEV_ACTIVE;
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484

	cmd->se_tfo = tfo;
	cmd->se_sess = se_sess;
	cmd->data_length = data_length;
	cmd->data_direction = data_direction;
	cmd->sam_task_attr = task_attr;
	cmd->sense_buffer = sense_buffer;
}
EXPORT_SYMBOL(transport_init_se_cmd);

static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
	/*
	 * Check if SAM Task Attribute emulation is enabled for this
	 * struct se_device storage object
	 */
1485
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1486 1487
		return 0;

1488
	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1489
		pr_debug("SAM Task Attribute ACA"
1490
			" emulation is not supported\n");
1491
		return -EINVAL;
1492 1493 1494 1495 1496
	}
	/*
	 * Used to determine when ORDERED commands should go from
	 * Dormant to Active status.
	 */
1497
	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1498
	smp_mb__after_atomic_inc();
1499
	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1500
			cmd->se_ordered_id, cmd->sam_task_attr,
1501
			cmd->se_dev->transport->name);
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	return 0;
}

/*	transport_generic_allocate_tasks():
 *
 *	Called from fabric RX Thread.
 */
int transport_generic_allocate_tasks(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
	int ret;

	transport_generic_prepare_cdb(cdb);
	/*
	 * Ensure that the received CDB is less than the max (252 + 8) bytes
	 * for VARIABLE_LENGTH_CMD
	 */
	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1521
		pr_err("Received SCSI CDB with command_size: %d that"
1522 1523
			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1524 1525
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1526
		return -EINVAL;
1527 1528 1529 1530 1531 1532
	}
	/*
	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
	 * allocate the additional extended CDB buffer now..  Otherwise
	 * setup the pointer from __t_task_cdb to t_task_cdb.
	 */
1533 1534
	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1535
						GFP_KERNEL);
1536 1537
		if (!cmd->t_task_cdb) {
			pr_err("Unable to allocate cmd->t_task_cdb"
1538
				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1539
				scsi_command_size(cdb),
1540
				(unsigned long)sizeof(cmd->__t_task_cdb));
1541 1542 1543
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason =
					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1544
			return -ENOMEM;
1545 1546
		}
	} else
1547
		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1548
	/*
1549
	 * Copy the original CDB into cmd->
1550
	 */
1551
	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1552 1553 1554
	/*
	 * Setup the received CDB based on SCSI defined opcodes and
	 * perform unit attention, persistent reservations and ALUA
1555
	 * checks for virtual device backends.  The cmd->t_task_cdb
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	 * pointer is expected to be setup before we reach this point.
	 */
	ret = transport_generic_cmd_sequencer(cmd, cdb);
	if (ret < 0)
		return ret;
	/*
	 * Check for SAM Task Attribute Emulation
	 */
	if (transport_check_alloc_task_attr(cmd) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1567
		return -EINVAL;
1568 1569 1570 1571 1572 1573 1574 1575 1576
	}
	spin_lock(&cmd->se_lun->lun_sep_lock);
	if (cmd->se_lun->lun_sep)
		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
	spin_unlock(&cmd->se_lun->lun_sep_lock);
	return 0;
}
EXPORT_SYMBOL(transport_generic_allocate_tasks);

1577 1578 1579 1580 1581 1582 1583
/*
 * Used by fabric module frontends to queue tasks directly.
 * Many only be used from process context only
 */
int transport_handle_cdb_direct(
	struct se_cmd *cmd)
{
1584 1585
	int ret;

1586 1587
	if (!cmd->se_lun) {
		dump_stack();
1588
		pr_err("cmd->se_lun is NULL\n");
1589 1590 1591 1592
		return -EINVAL;
	}
	if (in_interrupt()) {
		dump_stack();
1593
		pr_err("transport_generic_handle_cdb cannot be called"
1594 1595 1596
				" from interrupt context\n");
		return -EINVAL;
	}
1597
	/*
1598
	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1599 1600
	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
	 * in existing usage to ensure that outstanding descriptors are handled
1601
	 * correctly during shutdown via transport_wait_for_tasks()
1602 1603 1604 1605 1606
	 *
	 * Also, we don't take cmd->t_state_lock here as we only expect
	 * this to be called for initial descriptor submission.
	 */
	cmd->t_state = TRANSPORT_NEW_CMD;
1607 1608
	cmd->transport_state |= CMD_T_ACTIVE;

1609 1610 1611 1612 1613 1614
	/*
	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
	 * so follow TRANSPORT_NEW_CMD processing thread context usage
	 * and call transport_generic_request_failure() if necessary..
	 */
	ret = transport_generic_new_cmd(cmd);
1615 1616 1617
	if (ret < 0)
		transport_generic_request_failure(cmd);

1618
	return 0;
1619 1620 1621
}
EXPORT_SYMBOL(transport_handle_cdb_direct);

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_addr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 **/
1638
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{
	struct se_portal_group *se_tpg;
	int rc;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);
	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
	BUG_ON(in_interrupt());
	/*
	 * Initialize se_cmd for target operation.  From this point
	 * exceptions are handled by sending exception status via
	 * target_core_fabric_ops->queue_status() callback
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
				data_length, data_dir, task_attr, sense);
	/*
	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
	 * kref_put() to happen during fabric packet acknowledgement.
	 */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
	/*
	 * Signal bidirectional data payloads to target-core
	 */
	if (flags & TARGET_SCF_BIDI_OP)
		se_cmd->se_cmd_flags |= SCF_BIDI;
	/*
	 * Locate se_lun pointer and attach it to struct se_cmd
	 */
1671 1672 1673 1674 1675 1676
	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
		transport_send_check_condition_and_sense(se_cmd,
				se_cmd->scsi_sense_reason, 0);
		target_put_sess_cmd(se_sess, se_cmd);
		return;
	}
1677 1678 1679 1680 1681
	/*
	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
	 * allocate the necessary tasks to complete the received CDB+data
	 */
	rc = transport_generic_allocate_tasks(se_cmd, cdb);
1682 1683 1684 1685
	if (rc != 0) {
		transport_generic_request_failure(se_cmd);
		return;
	}
1686 1687 1688 1689 1690 1691 1692
	/*
	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
	 * for immediate execution of READs, otherwise wait for
	 * transport_generic_handle_data() to be called for WRITEs
	 * when fabric has filled the incoming buffer.
	 */
	transport_handle_cdb_direct(se_cmd);
1693
	return;
1694 1695 1696
}
EXPORT_SYMBOL(target_submit_cmd);

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_context: fabric context for TMR req
 * @tm_type: Type of TM request
1707
 * @flags: submit cmd flags
1708 1709 1710 1711
 *
 * Callable from all contexts.
 **/

1712
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
		unsigned char *sense, u32 unpacked_lun,
		void *fabric_tmr_ptr, unsigned char tm_type, int flags)
{
	struct se_portal_group *se_tpg;
	int ret;

	se_tpg = se_sess->se_tpg;
	BUG_ON(!se_tpg);

	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1724 1725 1726 1727 1728 1729 1730
	/*
	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
	 * allocation failure.
	 */
	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, GFP_KERNEL);
	if (ret < 0)
		return -ENOMEM;
1731 1732 1733 1734 1735 1736

	/* See target_submit_cmd for commentary */
	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));

	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
	if (ret) {
1737 1738
		se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
		se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1739
		transport_generic_free_cmd(se_cmd, 0);
1740
		return 0;
1741 1742
	}
	transport_generic_handle_tmr(se_cmd);
1743
	return 0;
1744 1745 1746
}
EXPORT_SYMBOL(target_submit_tmr);

1747 1748 1749 1750 1751 1752 1753 1754
/*
 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
 * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
 * complete setup in TCM process context w/ TFO->new_cmd_map().
 */
int transport_generic_handle_cdb_map(
	struct se_cmd *cmd)
{
1755
	if (!cmd->se_lun) {
1756
		dump_stack();
1757
		pr_err("cmd->se_lun is NULL\n");
1758
		return -EINVAL;
1759 1760
	}

1761
	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);

/*	transport_generic_handle_data():
 *
 *
 */
int transport_generic_handle_data(
	struct se_cmd *cmd)
{
	/*
	 * For the software fabric case, then we assume the nexus is being
	 * failed/shutdown when signals are pending from the kthread context
	 * caller, so we return a failure.  For the HW target mode case running
	 * in interrupt code, the signal_pending() check is skipped.
	 */
	if (!in_interrupt() && signal_pending(current))
1780
		return -EPERM;
1781 1782 1783 1784
	/*
	 * If the received CDB has aleady been ABORTED by the generic
	 * target engine, we now call transport_check_aborted_status()
	 * to queue any delated TASK_ABORTED status for the received CDB to the
L
Lucas De Marchi 已提交
1785
	 * fabric module as we are expecting no further incoming DATA OUT
1786 1787 1788 1789 1790
	 * sequences at this point.
	 */
	if (transport_check_aborted_status(cmd, 1) != 0)
		return 0;

1791
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);

/*	transport_generic_handle_tmr():
 *
 *
 */
int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{
1803
	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1804 1805 1806 1807
	return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
/*
 * If the task is active, request it to be stopped and sleep until it
 * has completed.
 */
bool target_stop_task(struct se_task *task, unsigned long *flags)
{
	struct se_cmd *cmd = task->task_se_cmd;
	bool was_active = false;

	if (task->task_flags & TF_ACTIVE) {
		task->task_flags |= TF_REQUEST_STOP;
		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);

		pr_debug("Task %p waiting to complete\n", task);
		wait_for_completion(&task->task_stop_comp);
		pr_debug("Task %p stopped successfully\n", task);

		spin_lock_irqsave(&cmd->t_state_lock, *flags);
		atomic_dec(&cmd->t_task_cdbs_left);
		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
		was_active = true;
	}

	return was_active;
}

1834 1835 1836 1837 1838 1839
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
	int ret = 0;

1840
	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1841
		cmd->se_tfo->get_task_tag(cmd));
1842 1843 1844 1845

	/*
	 * No tasks remain in the execution queue
	 */
1846
	spin_lock_irqsave(&cmd->t_state_lock, flags);
1847
	list_for_each_entry_safe(task, task_tmp,
1848
				&cmd->t_task_list, t_list) {
1849
		pr_debug("Processing task %p\n", task);
1850 1851 1852 1853
		/*
		 * If the struct se_task has not been sent and is not active,
		 * remove the struct se_task from the execution queue.
		 */
1854
		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1855
			spin_unlock_irqrestore(&cmd->t_state_lock,
1856 1857
					flags);
			transport_remove_task_from_execute_queue(task,
1858
					cmd->se_dev);
1859

1860
			pr_debug("Task %p removed from execute queue\n", task);
1861
			spin_lock_irqsave(&cmd->t_state_lock, flags);
1862 1863 1864
			continue;
		}

1865
		if (!target_stop_task(task, &flags)) {
1866
			pr_debug("Task %p - did nothing\n", task);
1867 1868 1869
			ret++;
		}
	}
1870
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1871 1872 1873 1874 1875 1876 1877

	return ret;
}

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
1878
void transport_generic_request_failure(struct se_cmd *cmd)
1879
{
1880 1881
	int ret = 0;

1882
	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1883
		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1884
		cmd->t_task_cdb[0]);
1885
	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1886
		cmd->se_tfo->get_cmd_state(cmd),
1887
		cmd->t_state, cmd->scsi_sense_reason);
1888
	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1889
		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1890 1891
		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
		cmd->t_task_list_num,
1892 1893 1894
		atomic_read(&cmd->t_task_cdbs_left),
		atomic_read(&cmd->t_task_cdbs_sent),
		atomic_read(&cmd->t_task_cdbs_ex_left),
1895 1896 1897
		(cmd->transport_state & CMD_T_ACTIVE) != 0,
		(cmd->transport_state & CMD_T_STOP) != 0,
		(cmd->transport_state & CMD_T_SENT) != 0);
1898 1899 1900 1901 1902 1903 1904

	/*
	 * For SAM Task Attribute emulation for failed struct se_cmd
	 */
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
	switch (cmd->scsi_sense_reason) {
	case TCM_NON_EXISTENT_LUN:
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_INVALID_CDB_FIELD:
	case TCM_INVALID_PARAMETER_LIST:
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	case TCM_UNKNOWN_MODE_PAGE:
	case TCM_WRITE_PROTECTED:
	case TCM_CHECK_CONDITION_ABORT_CMD:
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
	case TCM_CHECK_CONDITION_NOT_READY:
1916
		break;
1917
	case TCM_RESERVATION_CONFLICT:
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
		/*
		 * No SENSE Data payload for this case, set SCSI Status
		 * and queue the response to $FABRIC_MOD.
		 *
		 * Uses linux/include/scsi/scsi.h SAM status codes defs
		 */
		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
		/*
		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
		 * CONFLICT STATUS.
		 *
		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
		 */
1932 1933 1934
		if (cmd->se_sess &&
		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1935 1936 1937
				cmd->orig_fe_lun, 0x2C,
				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);

1938
		ret = cmd->se_tfo->queue_status(cmd);
1939
		if (ret == -EAGAIN || ret == -ENOMEM)
1940
			goto queue_full;
1941 1942
		goto check_stop;
	default:
1943
		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1944
			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1945 1946 1947
		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
		break;
	}
1948 1949 1950 1951 1952 1953 1954
	/*
	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
	 * make the call to transport_send_check_condition_and_sense()
	 * directly.  Otherwise expect the fabric to make the call to
	 * transport_send_check_condition_and_sense() after handling
	 * possible unsoliticied write data payloads.
	 */
1955 1956 1957 1958
	ret = transport_send_check_condition_and_sense(cmd,
			cmd->scsi_sense_reason, 0);
	if (ret == -EAGAIN || ret == -ENOMEM)
		goto queue_full;
1959

1960 1961
check_stop:
	transport_lun_remove_cmd(cmd);
1962
	if (!transport_cmd_check_stop_to_fabric(cmd))
1963
		;
1964 1965 1966
	return;

queue_full:
1967 1968
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
1969
}
1970
EXPORT_SYMBOL(transport_generic_request_failure);
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008

static inline u32 transport_lba_21(unsigned char *cdb)
{
	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}

static inline u32 transport_lba_32(unsigned char *cdb)
{
	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}

static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

/*
 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 */
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
	unsigned int __v1, __v2;

	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];

	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}

static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
	unsigned long flags;

2009
	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2010
	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2011
	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
}

/*
 * Called from Fabric Module context from transport_execute_tasks()
 *
 * The return of this function determins if the tasks from struct se_cmd
 * get added to the execution queue in transport_execute_tasks(),
 * or are added to the delayed or ordered lists here.
 */
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
2023
	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2024 2025
		return 1;
	/*
L
Lucas De Marchi 已提交
2026
	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2027 2028
	 * to allow the passed struct se_cmd list of tasks to the front of the list.
	 */
2029
	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2030
		pr_debug("Added HEAD_OF_QUEUE for CDB:"
2031
			" 0x%02x, se_ordered_id: %u\n",
2032
			cmd->t_task_cdb[0],
2033 2034
			cmd->se_ordered_id);
		return 1;
2035
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2036
		atomic_inc(&cmd->se_dev->dev_ordered_sync);
2037 2038
		smp_mb__after_atomic_inc();

2039
		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2040
				" list, se_ordered_id: %u\n",
2041
				cmd->t_task_cdb[0],
2042 2043 2044 2045 2046 2047
				cmd->se_ordered_id);
		/*
		 * Add ORDERED command to tail of execution queue if
		 * no other older commands exist that need to be
		 * completed first.
		 */
2048
		if (!atomic_read(&cmd->se_dev->simple_cmds))
2049 2050 2051 2052 2053
			return 1;
	} else {
		/*
		 * For SIMPLE and UNTAGGED Task Attribute commands
		 */
2054
		atomic_inc(&cmd->se_dev->simple_cmds);
2055 2056 2057 2058 2059 2060 2061
		smp_mb__after_atomic_inc();
	}
	/*
	 * Otherwise if one or more outstanding ORDERED task attribute exist,
	 * add the dormant task(s) built for the passed struct se_cmd to the
	 * execution queue and become in Active state for this struct se_device.
	 */
2062
	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2063 2064
		/*
		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
L
Lucas De Marchi 已提交
2065
		 * will be drained upon completion of HEAD_OF_QUEUE task.
2066
		 */
2067
		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2068
		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2069 2070 2071
		list_add_tail(&cmd->se_delayed_node,
				&cmd->se_dev->delayed_cmd_list);
		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2072

2073
		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2074
			" delayed CMD list, se_ordered_id: %u\n",
2075
			cmd->t_task_cdb[0], cmd->sam_task_attr,
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
			cmd->se_ordered_id);
		/*
		 * Return zero to let transport_execute_tasks() know
		 * not to add the delayed tasks to the execution list.
		 */
		return 0;
	}
	/*
	 * Otherwise, no ORDERED task attributes exist..
	 */
	return 1;
}

/*
 * Called from fabric module context in transport_generic_new_cmd() and
 * transport_generic_process_write()
 */
static int transport_execute_tasks(struct se_cmd *cmd)
{
	int add_tasks;
2096
	struct se_device *se_dev = cmd->se_dev;
2097 2098
	/*
	 * Call transport_cmd_check_stop() to see if a fabric exception
L
Lucas De Marchi 已提交
2099
	 * has occurred that prevents execution.
2100
	 */
2101
	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2102 2103 2104 2105 2106
		/*
		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
		 * attribute for the tasks of the received struct se_cmd CDB
		 */
		add_tasks = transport_execute_task_attr(cmd);
2107
		if (!add_tasks)
2108 2109
			goto execute_tasks;
		/*
2110 2111 2112
		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
		 * adds associated se_tasks while holding dev->execute_task_lock
		 * before I/O dispath to avoid a double spinlock access.
2113
		 */
2114 2115
		__transport_execute_tasks(se_dev, cmd);
		return 0;
2116
	}
2117

2118
execute_tasks:
2119
	__transport_execute_tasks(se_dev, NULL);
2120 2121 2122 2123 2124 2125 2126 2127 2128
	return 0;
}

/*
 * Called to check struct se_device tcq depth window, and once open pull struct se_task
 * from struct se_device->execute_task_list and
 *
 * Called from transport_processing_thread()
 */
2129
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2130 2131 2132
{
	int error;
	struct se_cmd *cmd = NULL;
2133
	struct se_task *task = NULL;
2134 2135 2136
	unsigned long flags;

check_depth:
2137
	spin_lock_irq(&dev->execute_task_lock);
2138 2139 2140
	if (new_cmd != NULL)
		__transport_add_tasks_from_cmd(new_cmd);

2141 2142
	if (list_empty(&dev->execute_task_list)) {
		spin_unlock_irq(&dev->execute_task_lock);
2143 2144
		return 0;
	}
2145 2146
	task = list_first_entry(&dev->execute_task_list,
				struct se_task, t_execute_list);
2147
	__transport_remove_task_from_execute_queue(task, dev);
2148
	spin_unlock_irq(&dev->execute_task_lock);
2149

2150
	cmd = task->task_se_cmd;
2151
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2152
	task->task_flags |= (TF_ACTIVE | TF_SENT);
2153
	atomic_inc(&cmd->t_task_cdbs_sent);
2154

2155 2156
	if (atomic_read(&cmd->t_task_cdbs_sent) ==
	    cmd->t_task_list_num)
2157
		cmd->transport_state |= CMD_T_SENT;
2158

2159
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2160

2161 2162 2163 2164
	if (cmd->execute_task)
		error = cmd->execute_task(task);
	else
		error = dev->transport->do_task(task);
2165 2166 2167
	if (error != 0) {
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		task->task_flags &= ~TF_ACTIVE;
2168
		cmd->transport_state &= ~CMD_T_SENT;
2169
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170

2171
		transport_stop_tasks_for_cmd(cmd);
2172
		transport_generic_request_failure(cmd);
2173 2174
	}

2175
	new_cmd = NULL;
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
	goto check_depth;

	return 0;
}

static inline u32 transport_get_sectors_6(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2186
	struct se_device *dev = cmd->se_dev;
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 8-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2198
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2199 2200 2201 2202
		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
2203 2204 2205 2206 2207 2208
	 * Use 8-bit sector value.  SBC-3 says:
	 *
	 *   A TRANSFER LENGTH field set to zero specifies that 256
	 *   logical blocks shall be written.  Any other value
	 *   specifies the number of logical blocks that shall be
	 *   written.
2209 2210
	 */
type_disk:
2211
	return cdb[4] ? : 256;
2212 2213 2214 2215 2216 2217 2218
}

static inline u32 transport_get_sectors_10(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2219
	struct se_device *dev = cmd->se_dev;
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 16-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_10 is not defined in SSC, throw an exception
	 */
2231 2232
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 16-bit sector value.
	 */
type_disk:
	return (u32)(cdb[7] << 8) + cdb[8];
}

static inline u32 transport_get_sectors_12(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2249
	struct se_device *dev = cmd->se_dev;
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * XXX_12 is not defined in SSC, throw an exception
	 */
2261 2262
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
		*ret = -EINVAL;
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
		return 0;
	}

	/*
	 * Everything else assume TYPE_DISK Sector CDB location.
	 * Use 32-bit sector value.
	 */
type_disk:
	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}

static inline u32 transport_get_sectors_16(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
2279
	struct se_device *dev = cmd->se_dev;
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290

	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	if (!dev)
		goto type_disk;

	/*
	 * Use 24-bit allocation length for TYPE_TAPE.
	 */
2291
	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];

type_disk:
	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
		    (cdb[12] << 8) + cdb[13];
}

/*
 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 */
static inline u32 transport_get_sectors_32(
	unsigned char *cdb,
	struct se_cmd *cmd,
	int *ret)
{
	/*
	 * Assume TYPE_DISK for non struct se_device objects.
	 * Use 32-bit sector value.
	 */
	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
		    (cdb[30] << 8) + cdb[31];

}

static inline u32 transport_get_size(
	u32 sectors,
	unsigned char *cdb,
	struct se_cmd *cmd)
{
2321
	struct se_device *dev = cmd->se_dev;
2322

2323
	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2324
		if (cdb[1] & 1) { /* sectors */
2325
			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2326 2327 2328 2329
		} else /* bytes */
			return sectors;
	}
#if 0
2330
	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2331 2332 2333
			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
			dev->transport->name);
2334
#endif
2335
	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2336 2337 2338 2339 2340
}

static void transport_xor_callback(struct se_cmd *cmd)
{
	unsigned char *buf, *addr;
2341
	struct scatterlist *sg;
2342 2343
	unsigned int offset;
	int i;
2344
	int count;
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
	/*
	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
	 *
	 * 1) read the specified logical block(s);
	 * 2) transfer logical blocks from the data-out buffer;
	 * 3) XOR the logical blocks transferred from the data-out buffer with
	 *    the logical blocks read, storing the resulting XOR data in a buffer;
	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
	 *    blocks transferred from the data-out buffer; and
	 * 5) transfer the resulting XOR data to the data-in buffer.
	 */
	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2357 2358
	if (!buf) {
		pr_err("Unable to allocate xor_callback buf\n");
2359 2360 2361
		return;
	}
	/*
2362
	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2363 2364
	 * into the locally allocated *buf
	 */
2365 2366 2367 2368 2369
	sg_copy_to_buffer(cmd->t_data_sg,
			  cmd->t_data_nents,
			  buf,
			  cmd->data_length);

2370 2371
	/*
	 * Now perform the XOR against the BIDI read memory located at
2372
	 * cmd->t_mem_bidi_list
2373 2374 2375
	 */

	offset = 0;
2376 2377 2378
	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
		addr = kmap_atomic(sg_page(sg), KM_USER0);
		if (!addr)
2379 2380
			goto out;

2381 2382
		for (i = 0; i < sg->length; i++)
			*(addr + sg->offset + i) ^= *(buf + offset + i);
2383

2384
		offset += sg->length;
2385 2386
		kunmap_atomic(addr, KM_USER0);
	}
2387

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
out:
	kfree(buf);
}

/*
 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
 */
static int transport_get_sense_data(struct se_cmd *cmd)
{
	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2398
	struct se_device *dev = cmd->se_dev;
2399 2400 2401 2402
	struct se_task *task = NULL, *task_tmp;
	unsigned long flags;
	u32 offset = 0;

2403 2404
	WARN_ON(!cmd->se_lun);

2405 2406 2407
	if (!dev)
		return 0;

2408
	spin_lock_irqsave(&cmd->t_state_lock, flags);
2409
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2410
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2411 2412 2413 2414
		return 0;
	}

	list_for_each_entry_safe(task, task_tmp,
2415
				&cmd->t_task_list, t_list) {
2416
		if (!(task->task_flags & TF_HAS_SENSE))
2417 2418
			continue;

2419
		if (!dev->transport->get_sense_buffer) {
2420
			pr_err("dev->transport->get_sense_buffer"
2421 2422 2423 2424
					" is NULL\n");
			continue;
		}

2425
		sense_buffer = dev->transport->get_sense_buffer(task);
2426
		if (!sense_buffer) {
2427
			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2428
				" sense buffer for task with sense\n",
2429
				cmd->se_tfo->get_task_tag(cmd), task);
2430 2431
			continue;
		}
2432
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2433

2434
		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2435 2436
				TRANSPORT_SENSE_BUFFER);

2437
		memcpy(&buffer[offset], sense_buffer,
2438 2439 2440 2441 2442 2443
				TRANSPORT_SENSE_BUFFER);
		cmd->scsi_status = task->task_scsi_status;
		/* Automatically padded */
		cmd->scsi_sense_length =
				(TRANSPORT_SENSE_BUFFER + offset);

2444
		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2445
				" and sense\n",
2446
			dev->se_hba->hba_id, dev->transport->name,
2447 2448 2449
				cmd->scsi_status);
		return 0;
	}
2450
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2451 2452 2453 2454

	return -1;
}

2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
static inline long long transport_dev_end_lba(struct se_device *dev)
{
	return dev->transport->get_blocks(dev) + 1;
}

static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	u32 sectors;

	if (dev->transport->get_device_type(dev) != TYPE_DISK)
		return 0;

	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);

2470 2471
	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
		pr_err("LBA: %llu Sectors: %u exceeds"
2472 2473 2474
			" transport_dev_end_lba(): %llu\n",
			cmd->t_task_lba, sectors,
			transport_dev_end_lba(dev));
2475
		return -EINVAL;
2476 2477
	}

2478
	return 0;
2479 2480
}

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
	/*
	 * Determine if the received WRITE_SAME is used to for direct
	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
	 */
	int passthrough = (dev->transport->transport_type ==
				TRANSPORT_PLUGIN_PHBA_PDEV);

	if (!passthrough) {
		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
			pr_err("WRITE_SAME PBDATA and LBDATA"
				" bits not supported for Block Discard"
				" Emulation\n");
			return -ENOSYS;
		}
		/*
		 * Currently for the emulated case we only accept
		 * tpws with the UNMAP=1 bit set.
		 */
		if (!(flags[0] & 0x08)) {
			pr_err("WRITE_SAME w/o UNMAP bit not"
				" supported for Block Discard Emulation\n");
			return -ENOSYS;
		}
	}

	return 0;
}

2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
/*	transport_generic_cmd_sequencer():
 *
 *	Generic Command Sequencer that should work for most DAS transport
 *	drivers.
 *
 *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
 *	RX Thread.
 *
 *	FIXME: Need to support other SCSI OPCODES where as well.
 */
static int transport_generic_cmd_sequencer(
	struct se_cmd *cmd,
	unsigned char *cdb)
{
2527
	struct se_device *dev = cmd->se_dev;
2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	int ret = 0, sector_ret = 0, passthrough;
	u32 sectors = 0, size = 0, pr_reg_type = 0;
	u16 service_action;
	u8 alua_ascq = 0;
	/*
	 * Check for an existing UNIT ATTENTION condition
	 */
	if (core_scsi3_ua_check(cmd, cdb) < 0) {
		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2539
		return -EINVAL;
2540 2541 2542 2543
	}
	/*
	 * Check status of Asymmetric Logical Unit Assignment port
	 */
2544
	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2545 2546
	if (ret != 0) {
		/*
L
Lucas De Marchi 已提交
2547
		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2548 2549 2550 2551 2552
		 * The ALUA additional sense code qualifier (ASCQ) is determined
		 * by the ALUA primary or secondary access state..
		 */
		if (ret > 0) {
#if 0
2553
			pr_debug("[%s]: ALUA TG Port not available,"
2554
				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2555
				cmd->se_tfo->get_fabric_name(), alua_ascq);
2556 2557 2558 2559
#endif
			transport_set_sense_codes(cmd, 0x04, alua_ascq);
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2560
			return -EINVAL;
2561 2562 2563 2564 2565 2566
		}
		goto out_invalid_cdb_field;
	}
	/*
	 * Check status for SPC-3 Persistent Reservations
	 */
2567 2568
	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2569 2570 2571 2572 2573 2574
					cmd, cdb, pr_reg_type) != 0) {
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
			return -EBUSY;
		}
2575 2576 2577 2578 2579 2580 2581
		/*
		 * This means the CDB is allowed for the SCSI Initiator port
		 * when said port is *NOT* holding the legacy SPC-2 or
		 * SPC-3 Persistent Reservation.
		 */
	}

2582 2583 2584 2585 2586 2587 2588
	/*
	 * If we operate in passthrough mode we skip most CDB emulation and
	 * instead hand the commands down to the physical SCSI device.
	 */
	passthrough =
		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);

2589 2590 2591 2592 2593 2594
	switch (cdb[0]) {
	case READ_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2595
		cmd->t_task_lba = transport_lba_21(cdb);
2596 2597 2598 2599 2600 2601 2602
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2603
		cmd->t_task_lba = transport_lba_32(cdb);
2604 2605 2606 2607 2608 2609 2610
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2611
		cmd->t_task_lba = transport_lba_32(cdb);
2612 2613 2614 2615 2616 2617 2618
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case READ_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2619
		cmd->t_task_lba = transport_lba_64(cdb);
2620 2621 2622 2623 2624 2625 2626
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_6:
		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2627
		cmd->t_task_lba = transport_lba_21(cdb);
2628 2629 2630 2631 2632 2633 2634
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_10:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2635
		cmd->t_task_lba = transport_lba_32(cdb);
2636 2637
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2638 2639 2640 2641 2642 2643 2644
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_12:
		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2645
		cmd->t_task_lba = transport_lba_32(cdb);
2646 2647
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2648 2649 2650 2651 2652 2653 2654
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case WRITE_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2655
		cmd->t_task_lba = transport_lba_64(cdb);
2656 2657
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2658 2659 2660 2661
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
		break;
	case XDWRITEREAD_10:
		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2662
		    !(cmd->se_cmd_flags & SCF_BIDI))
2663 2664 2665 2666 2667
			goto out_invalid_cdb_field;
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
		size = transport_get_size(sectors, cdb, cmd);
2668
		cmd->t_task_lba = transport_lba_32(cdb);
2669
		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2670

2671 2672 2673 2674
		/*
		 * Do now allow BIDI commands for passthrough mode.
		 */
		if (passthrough)
2675
			goto out_unsupported_cdb;
2676

2677
		/*
2678
		 * Setup BIDI XOR callback to be run after I/O completion.
2679 2680
		 */
		cmd->transport_complete_callback = &transport_xor_callback;
2681 2682
		if (cdb[1] & 0x8)
			cmd->se_cmd_flags |= SCF_FUA;
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
		break;
	case VARIABLE_LENGTH_CMD:
		service_action = get_unaligned_be16(&cdb[8]);
		switch (service_action) {
		case XDWRITEREAD_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
			size = transport_get_size(sectors, cdb, cmd);
			/*
			 * Use WRITE_32 and READ_32 opcodes for the emulated
			 * XDWRITE_READ_32 logic.
			 */
2696
			cmd->t_task_lba = transport_lba_64_ext(cdb);
2697 2698
			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;

2699 2700 2701
			/*
			 * Do now allow BIDI commands for passthrough mode.
			 */
2702
			if (passthrough)
2703
				goto out_unsupported_cdb;
2704

2705
			/*
2706 2707
			 * Setup BIDI XOR callback to be run during after I/O
			 * completion.
2708 2709
			 */
			cmd->transport_complete_callback = &transport_xor_callback;
2710 2711
			if (cdb[1] & 0x8)
				cmd->se_cmd_flags |= SCF_FUA;
2712 2713 2714 2715 2716
			break;
		case WRITE_SAME_32:
			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
			if (sector_ret)
				goto out_unsupported_cdb;
2717

2718
			if (sectors)
2719
				size = transport_get_size(1, cdb, cmd);
2720 2721 2722 2723 2724
			else {
				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
				       " supported\n");
				goto out_invalid_cdb_field;
			}
2725

2726
			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2727 2728
			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

2729
			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2730
				goto out_unsupported_cdb;
2731 2732
			if (!passthrough)
				cmd->execute_task = target_emulate_write_same;
2733 2734
			break;
		default:
2735
			pr_err("VARIABLE_LENGTH_CMD service action"
2736 2737 2738 2739
				" 0x%04x not supported\n", service_action);
			goto out_unsupported_cdb;
		}
		break;
2740
	case MAINTENANCE_IN:
2741
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2742 2743 2744 2745
			/* MAINTENANCE_IN from SCC-2 */
			/*
			 * Check for emulated MI_REPORT_TARGET_PGS.
			 */
2746 2747 2748 2749
			if (cdb[1] == MI_REPORT_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_report_target_port_groups;
2750 2751 2752 2753 2754 2755 2756
			}
			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else {
			/* GPCMD_SEND_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2757
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768
		break;
	case MODE_SELECT:
		size = cdb[4];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SELECT_10:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case MODE_SENSE:
		size = cdb[4];
2769
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2770 2771
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
2772 2773
		break;
	case MODE_SENSE_10:
2774 2775 2776 2777 2778
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_modesense;
		break;
2779 2780 2781 2782 2783
	case GPCMD_READ_BUFFER_CAPACITY:
	case GPCMD_SEND_OPC:
	case LOG_SELECT:
	case LOG_SENSE:
		size = (cdb[7] << 8) + cdb[8];
2784
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2785 2786 2787
		break;
	case READ_BLOCK_LIMITS:
		size = READ_BLOCK_LEN;
2788
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2789 2790 2791 2792 2793 2794 2795 2796 2797
		break;
	case GPCMD_GET_CONFIGURATION:
	case GPCMD_READ_FORMAT_CAPACITIES:
	case GPCMD_READ_DISC_INFO:
	case GPCMD_READ_TRACK_RZONE_INFO:
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case PERSISTENT_RESERVE_IN:
2798
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2799
			cmd->execute_task = target_scsi3_emulate_pr_in;
2800 2801 2802
		size = (cdb[7] << 8) + cdb[8];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
2803
	case PERSISTENT_RESERVE_OUT:
2804
		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2805
			cmd->execute_task = target_scsi3_emulate_pr_out;
2806
		size = (cdb[7] << 8) + cdb[8];
2807
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2808 2809 2810 2811 2812 2813 2814 2815
		break;
	case GPCMD_MECHANISM_STATUS:
	case GPCMD_READ_DVD_STRUCTURE:
		size = (cdb[8] << 8) + cdb[9];
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
		break;
	case READ_POSITION:
		size = READ_POSITION_LEN;
2816
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2817
		break;
2818
	case MAINTENANCE_OUT:
2819
		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2820 2821 2822 2823
			/* MAINTENANCE_OUT from SCC-2
			 *
			 * Check for emulated MO_SET_TARGET_PGS.
			 */
2824 2825 2826 2827
			if (cdb[1] == MO_SET_TARGET_PGS &&
			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
				cmd->execute_task =
					target_emulate_set_target_port_groups;
2828 2829 2830 2831 2832 2833 2834 2835
			}

			size = (cdb[6] << 24) | (cdb[7] << 16) |
			       (cdb[8] << 8) | cdb[9];
		} else  {
			/* GPCMD_REPORT_KEY from multi media commands */
			size = (cdb[8] << 8) + cdb[9];
		}
2836
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2837 2838 2839 2840 2841 2842 2843
		break;
	case INQUIRY:
		size = (cdb[3] << 8) + cdb[4];
		/*
		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
		 * See spc4r17 section 5.3
		 */
2844
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2845
			cmd->sam_task_attr = MSG_HEAD_TAG;
2846
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2847 2848
		if (!passthrough)
			cmd->execute_task = target_emulate_inquiry;
2849 2850 2851
		break;
	case READ_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2852
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2853 2854 2855
		break;
	case READ_CAPACITY:
		size = READ_CAP_LEN;
2856
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2857 2858
		if (!passthrough)
			cmd->execute_task = target_emulate_readcapacity;
2859 2860 2861 2862 2863
		break;
	case READ_MEDIA_SERIAL_NUMBER:
	case SECURITY_PROTOCOL_IN:
	case SECURITY_PROTOCOL_OUT:
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2864
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2865 2866
		break;
	case SERVICE_ACTION_IN:
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
		switch (cmd->t_task_cdb[1] & 0x1f) {
		case SAI_READ_CAPACITY_16:
			if (!passthrough)
				cmd->execute_task =
					target_emulate_readcapacity_16;
			break;
		default:
			if (passthrough)
				break;

			pr_err("Unsupported SA: 0x%02x\n",
				cmd->t_task_cdb[1] & 0x1f);
			goto out_unsupported_cdb;
		}
		/*FALLTHROUGH*/
2882 2883 2884 2885 2886 2887 2888 2889
	case ACCESS_CONTROL_IN:
	case ACCESS_CONTROL_OUT:
	case EXTENDED_COPY:
	case READ_ATTRIBUTE:
	case RECEIVE_COPY_RESULTS:
	case WRITE_ATTRIBUTE:
		size = (cdb[10] << 24) | (cdb[11] << 16) |
		       (cdb[12] << 8) | cdb[13];
2890
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2891 2892 2893 2894
		break;
	case RECEIVE_DIAGNOSTIC:
	case SEND_DIAGNOSTIC:
		size = (cdb[3] << 8) | cdb[4];
2895
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2896 2897 2898 2899 2900 2901
		break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
	case GPCMD_READ_CD:
		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
		size = (2336 * sectors);
2902
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2903 2904 2905 2906
		break;
#endif
	case READ_TOC:
		size = cdb[8];
2907
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2908 2909 2910
		break;
	case REQUEST_SENSE:
		size = cdb[4];
2911
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2912 2913
		if (!passthrough)
			cmd->execute_task = target_emulate_request_sense;
2914 2915 2916
		break;
	case READ_ELEMENT_STATUS:
		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2917
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2918 2919 2920
		break;
	case WRITE_BUFFER:
		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2921
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
		break;
	case RESERVE:
	case RESERVE_10:
		/*
		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		 */
		if (cdb[0] == RESERVE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

		/*
		 * Setup the legacy emulated handler for SPC-2 and
		 * >= SPC-3 compatible reservation handling (CRH=1)
		 * Otherwise, we assume the underlying SCSI logic is
		 * is running in SPC_PASSTHROUGH, and wants reservations
		 * emulation disabled.
		 */
2941 2942
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_reserve;
2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case RELEASE:
	case RELEASE_10:
		/*
		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
		*/
		if (cdb[0] == RELEASE_10)
			size = (cdb[7] << 8) | cdb[8];
		else
			size = cmd->data_length;

2956 2957
		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
			cmd->execute_task = target_scsi2_reservation_release;
2958 2959 2960
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case SYNCHRONIZE_CACHE:
2961
	case SYNCHRONIZE_CACHE_16:
2962 2963 2964 2965 2966
		/*
		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
		 */
		if (cdb[0] == SYNCHRONIZE_CACHE) {
			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2967
			cmd->t_task_lba = transport_lba_32(cdb);
2968 2969
		} else {
			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2970
			cmd->t_task_lba = transport_lba_64(cdb);
2971 2972 2973 2974 2975 2976 2977
		}
		if (sector_ret)
			goto out_unsupported_cdb;

		size = transport_get_size(sectors, cdb, cmd);
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;

2978
		if (passthrough)
2979
			break;
2980

2981 2982
		/*
		 * Check to ensure that LBA + Range does not exceed past end of
2983
		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2984
		 */
2985 2986 2987 2988
		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
			if (transport_cmd_get_valid_sectors(cmd) < 0)
				goto out_invalid_cdb_field;
		}
2989
		cmd->execute_task = target_emulate_synchronize_cache;
2990 2991 2992
		break;
	case UNMAP:
		size = get_unaligned_be16(&cdb[7]);
2993
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2994 2995
		if (!passthrough)
			cmd->execute_task = target_emulate_unmap;
2996 2997 2998 2999 3000
		break;
	case WRITE_SAME_16:
		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;
3001

3002
		if (sectors)
3003
			size = transport_get_size(1, cdb, cmd);
3004 3005 3006 3007
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
		}
3008

3009
		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3010 3011 3012
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;

		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3013
			goto out_unsupported_cdb;
3014 3015
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3016 3017 3018 3019 3020 3021 3022
		break;
	case WRITE_SAME:
		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
		if (sector_ret)
			goto out_unsupported_cdb;

		if (sectors)
3023
			size = transport_get_size(1, cdb, cmd);
3024 3025 3026
		else {
			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
			goto out_invalid_cdb_field;
3027
		}
3028 3029

		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3030
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3031 3032 3033 3034 3035
		/*
		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
		 * of byte 1 bit 3 UNMAP instead of original reserved field
		 */
		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3036
			goto out_unsupported_cdb;
3037 3038
		if (!passthrough)
			cmd->execute_task = target_emulate_write_same;
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
		break;
	case ALLOW_MEDIUM_REMOVAL:
	case ERASE:
	case REZERO_UNIT:
	case SEEK_10:
	case SPACE:
	case START_STOP:
	case TEST_UNIT_READY:
	case VERIFY:
	case WRITE_FILEMARKS:
3049 3050 3051 3052 3053 3054 3055 3056
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		if (!passthrough)
			cmd->execute_task = target_emulate_noop;
		break;
	case GPCMD_CLOSE_TRACK:
	case INITIALIZE_ELEMENT_STATUS:
	case GPCMD_LOAD_UNLOAD:
	case GPCMD_SET_SPEED:
3057 3058 3059 3060
	case MOVE_MEDIUM:
		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
		break;
	case REPORT_LUNS:
3061
		cmd->execute_task = target_report_luns;
3062 3063 3064 3065 3066
		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
		/*
		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
		 * See spc4r17 section 5.3
		 */
3067
		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3068
			cmd->sam_task_attr = MSG_HEAD_TAG;
3069
		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3070 3071
		break;
	default:
3072
		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3073
			" 0x%02x, sending CHECK_CONDITION.\n",
3074
			cmd->se_tfo->get_fabric_name(), cdb[0]);
3075 3076 3077 3078
		goto out_unsupported_cdb;
	}

	if (size != cmd->data_length) {
3079
		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3080
			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3081
			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3082 3083 3084 3085 3086
				cmd->data_length, size, cdb[0]);

		cmd->cmd_spdtl = size;

		if (cmd->data_direction == DMA_TO_DEVICE) {
3087
			pr_err("Rejecting underflow/overflow"
3088 3089 3090 3091 3092 3093 3094
					" WRITE data\n");
			goto out_invalid_cdb_field;
		}
		/*
		 * Reject READ_* or WRITE_* with overflow/underflow for
		 * type SCF_SCSI_DATA_SG_IO_CDB.
		 */
3095 3096
		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3097
				" CDB on non 512-byte sector setup subsystem"
3098
				" plugin: %s\n", dev->transport->name);
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
			goto out_invalid_cdb_field;
		}

		if (size > cmd->data_length) {
			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
			cmd->residual_count = (size - cmd->data_length);
		} else {
			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
			cmd->residual_count = (cmd->data_length - size);
		}
		cmd->data_length = size;
	}

3113 3114 3115 3116 3117 3118 3119
	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
	    sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
				   cdb[0], sectors);
		goto out_invalid_cdb_field;
	}

3120 3121 3122 3123 3124
	/* reject any command that we don't have a handler for */
	if (!(passthrough || cmd->execute_task ||
	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
		goto out_unsupported_cdb;

3125 3126 3127 3128 3129 3130
	transport_set_supported_SAM_opcode(cmd);
	return ret;

out_unsupported_cdb:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3131
	return -EINVAL;
3132 3133 3134
out_invalid_cdb_field:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3135
	return -EINVAL;
3136 3137 3138
}

/*
3139
 * Called from I/O completion to determine which dormant/delayed
3140 3141 3142 3143
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{
3144
	struct se_device *dev = cmd->se_dev;
3145 3146 3147
	struct se_cmd *cmd_p, *cmd_tmp;
	int new_active_tasks = 0;

3148
	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3149 3150 3151
		atomic_dec(&dev->simple_cmds);
		smp_mb__after_atomic_dec();
		dev->dev_cur_ordered_id++;
3152
		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3153 3154
			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3155
	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3156
		dev->dev_cur_ordered_id++;
3157
		pr_debug("Incremented dev_cur_ordered_id: %u for"
3158 3159
			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
			cmd->se_ordered_id);
3160
	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3161 3162 3163 3164
		atomic_dec(&dev->dev_ordered_sync);
		smp_mb__after_atomic_dec();

		dev->dev_cur_ordered_id++;
3165
		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3166 3167 3168 3169 3170 3171 3172 3173 3174
			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
	}
	/*
	 * Process all commands up to the last received
	 * ORDERED task attribute which requires another blocking
	 * boundary
	 */
	spin_lock(&dev->delayed_cmd_lock);
	list_for_each_entry_safe(cmd_p, cmd_tmp,
3175
			&dev->delayed_cmd_list, se_delayed_node) {
3176

3177
		list_del(&cmd_p->se_delayed_node);
3178 3179
		spin_unlock(&dev->delayed_cmd_lock);

3180
		pr_debug("Calling add_tasks() for"
3181 3182
			" cmd_p: 0x%02x Task Attr: 0x%02x"
			" Dormant -> Active, se_ordered_id: %u\n",
3183
			cmd_p->t_task_cdb[0],
3184 3185 3186 3187 3188 3189
			cmd_p->sam_task_attr, cmd_p->se_ordered_id);

		transport_add_tasks_from_cmd(cmd_p);
		new_active_tasks++;

		spin_lock(&dev->delayed_cmd_lock);
3190
		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3191 3192 3193 3194 3195 3196 3197 3198
			break;
	}
	spin_unlock(&dev->delayed_cmd_lock);
	/*
	 * If new tasks have become active, wake up the transport thread
	 * to do the processing of the Active tasks.
	 */
	if (new_active_tasks != 0)
3199
		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3200 3201
}

3202
static void transport_complete_qf(struct se_cmd *cmd)
3203 3204 3205
{
	int ret = 0;

3206 3207 3208 3209 3210 3211 3212 3213
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
		transport_complete_task_attr(cmd);

	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		ret = cmd->se_tfo->queue_status(cmd);
		if (ret)
			goto out;
	}
3214 3215 3216 3217 3218 3219

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		ret = cmd->se_tfo->queue_data_in(cmd);
		break;
	case DMA_TO_DEVICE:
3220
		if (cmd->t_bidi_data_sg) {
3221 3222
			ret = cmd->se_tfo->queue_data_in(cmd);
			if (ret < 0)
3223
				break;
3224 3225 3226 3227 3228 3229 3230 3231 3232
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
		ret = cmd->se_tfo->queue_status(cmd);
		break;
	default:
		break;
	}

3233 3234 3235 3236 3237 3238 3239
out:
	if (ret < 0) {
		transport_handle_queue_full(cmd, cmd->se_dev);
		return;
	}
	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3240 3241 3242 3243
}

static void transport_handle_queue_full(
	struct se_cmd *cmd,
3244
	struct se_device *dev)
3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
{
	spin_lock_irq(&dev->qf_cmd_lock);
	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
	atomic_inc(&dev->dev_qf_count);
	smp_mb__after_atomic_inc();
	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);

	schedule_work(&cmd->se_dev->qf_work_queue);
}

3255
static void target_complete_ok_work(struct work_struct *work)
3256
{
3257
	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3258
	int reason = 0, ret;
3259

3260 3261 3262 3263 3264
	/*
	 * Check if we need to move delayed/dormant tasks from cmds on the
	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
	 * Attribute.
	 */
3265
	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3266
		transport_complete_task_attr(cmd);
3267 3268 3269 3270 3271 3272 3273
	/*
	 * Check to schedule QUEUE_FULL work, or execute an existing
	 * cmd->transport_qf_callback()
	 */
	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
		schedule_work(&cmd->se_dev->qf_work_queue);

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
	/*
	 * Check if we need to retrieve a sense buffer from
	 * the struct se_cmd in question.
	 */
	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
		if (transport_get_sense_data(cmd) < 0)
			reason = TCM_NON_EXISTENT_LUN;

		/*
		 * Only set when an struct se_task->task_scsi_status returned
		 * a non GOOD status.
		 */
		if (cmd->scsi_status) {
3287
			ret = transport_send_check_condition_and_sense(
3288
					cmd, reason, 1);
3289
			if (ret == -EAGAIN || ret == -ENOMEM)
3290 3291
				goto queue_full;

3292 3293 3294 3295 3296 3297
			transport_lun_remove_cmd(cmd);
			transport_cmd_check_stop_to_fabric(cmd);
			return;
		}
	}
	/*
L
Lucas De Marchi 已提交
3298
	 * Check for a callback, used by amongst other things
3299 3300 3301 3302 3303 3304 3305 3306
	 * XDWRITE_READ_10 emulation.
	 */
	if (cmd->transport_complete_callback)
		cmd->transport_complete_callback(cmd);

	switch (cmd->data_direction) {
	case DMA_FROM_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3307 3308
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3309 3310 3311 3312
					cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);

3313
		ret = cmd->se_tfo->queue_data_in(cmd);
3314
		if (ret == -EAGAIN || ret == -ENOMEM)
3315
			goto queue_full;
3316 3317 3318
		break;
	case DMA_TO_DEVICE:
		spin_lock(&cmd->se_lun->lun_sep_lock);
3319 3320
		if (cmd->se_lun->lun_sep) {
			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3321 3322 3323 3324 3325 3326
				cmd->data_length;
		}
		spin_unlock(&cmd->se_lun->lun_sep_lock);
		/*
		 * Check if we need to send READ payload for BIDI-COMMAND
		 */
3327
		if (cmd->t_bidi_data_sg) {
3328
			spin_lock(&cmd->se_lun->lun_sep_lock);
3329 3330
			if (cmd->se_lun->lun_sep) {
				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3331 3332 3333
					cmd->data_length;
			}
			spin_unlock(&cmd->se_lun->lun_sep_lock);
3334
			ret = cmd->se_tfo->queue_data_in(cmd);
3335
			if (ret == -EAGAIN || ret == -ENOMEM)
3336
				goto queue_full;
3337 3338 3339 3340
			break;
		}
		/* Fall through for DMA_TO_DEVICE */
	case DMA_NONE:
3341
		ret = cmd->se_tfo->queue_status(cmd);
3342
		if (ret == -EAGAIN || ret == -ENOMEM)
3343
			goto queue_full;
3344 3345 3346 3347 3348 3349 3350
		break;
	default:
		break;
	}

	transport_lun_remove_cmd(cmd);
	transport_cmd_check_stop_to_fabric(cmd);
3351 3352 3353
	return;

queue_full:
3354
	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3355
		" data_direction: %d\n", cmd, cmd->data_direction);
3356 3357
	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
	transport_handle_queue_full(cmd, cmd->se_dev);
3358 3359 3360 3361 3362 3363
}

static void transport_free_dev_tasks(struct se_cmd *cmd)
{
	struct se_task *task, *task_tmp;
	unsigned long flags;
3364
	LIST_HEAD(dispose_list);
3365

3366
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3367
	list_for_each_entry_safe(task, task_tmp,
3368
				&cmd->t_task_list, t_list) {
3369 3370 3371 3372 3373 3374 3375
		if (!(task->task_flags & TF_ACTIVE))
			list_move_tail(&task->t_list, &dispose_list);
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

	while (!list_empty(&dispose_list)) {
		task = list_first_entry(&dispose_list, struct se_task, t_list);
3376

3377 3378 3379
		if (task->task_sg != cmd->t_data_sg &&
		    task->task_sg != cmd->t_bidi_data_sg)
			kfree(task->task_sg);
3380 3381 3382

		list_del(&task->t_list);

3383
		cmd->se_dev->transport->free_task(task);
3384 3385 3386
	}
}

3387
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3388
{
3389 3390
	struct scatterlist *sg;
	int count;
3391

3392 3393
	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
3394

3395 3396
	kfree(sgl);
}
3397

3398 3399 3400 3401 3402 3403
static inline void transport_free_pages(struct se_cmd *cmd)
{
	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
		return;

	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3404 3405
	cmd->t_data_sg = NULL;
	cmd->t_data_nents = 0;
3406

3407
	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3408 3409
	cmd->t_bidi_data_sg = NULL;
	cmd->t_bidi_data_nents = 0;
3410 3411
}

C
Christoph Hellwig 已提交
3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
/**
 * transport_release_cmd - free a command
 * @cmd:       command to free
 *
 * This routine unconditionally frees a command, and reference counting
 * or list removal must be done in the caller.
 */
static void transport_release_cmd(struct se_cmd *cmd)
{
	BUG_ON(!cmd->se_tfo);

3423
	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
C
Christoph Hellwig 已提交
3424 3425 3426 3427
		core_tmr_release_req(cmd->se_tmr_req);
	if (cmd->t_task_cdb != cmd->__t_task_cdb)
		kfree(cmd->t_task_cdb);
	/*
3428 3429
	 * If this cmd has been setup with target_get_sess_cmd(), drop
	 * the kref and call ->release_cmd() in kref callback.
C
Christoph Hellwig 已提交
3430
	 */
3431 3432 3433 3434
	 if (cmd->check_release != 0) {
		target_put_sess_cmd(cmd->se_sess, cmd);
		return;
	}
C
Christoph Hellwig 已提交
3435 3436 3437
	cmd->se_tfo->release_cmd(cmd);
}

3438 3439 3440 3441 3442 3443
/**
 * transport_put_cmd - release a reference to a command
 * @cmd:       command to release
 *
 * This routine releases our reference to the command and frees it if possible.
 */
3444
static void transport_put_cmd(struct se_cmd *cmd)
3445 3446
{
	unsigned long flags;
3447
	int free_tasks = 0;
3448

3449
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3450 3451 3452 3453 3454 3455 3456 3457 3458 3459
	if (atomic_read(&cmd->t_fe_count)) {
		if (!atomic_dec_and_test(&cmd->t_fe_count))
			goto out_busy;
	}

	if (atomic_read(&cmd->t_se_count)) {
		if (!atomic_dec_and_test(&cmd->t_se_count))
			goto out_busy;
	}

3460 3461
	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3462 3463
		transport_all_task_dev_remove_state(cmd);
		free_tasks = 1;
3464
	}
3465
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3466

3467 3468
	if (free_tasks != 0)
		transport_free_dev_tasks(cmd);
3469

3470
	transport_free_pages(cmd);
3471
	transport_release_cmd(cmd);
3472
	return;
3473 3474
out_busy:
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3475 3476 3477
}

/*
3478 3479
 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
 * allocating in the core.
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490
 * @cmd:  Associated se_cmd descriptor
 * @mem:  SGL style memory for TCM WRITE / READ
 * @sg_mem_num: Number of SGL elements
 * @mem_bidi_in: SGL style memory for TCM BIDI READ
 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
 *
 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
 * of parameters.
 */
int transport_generic_map_mem_to_cmd(
	struct se_cmd *cmd,
3491 3492 3493 3494
	struct scatterlist *sgl,
	u32 sgl_count,
	struct scatterlist *sgl_bidi,
	u32 sgl_bidi_count)
3495
{
3496
	if (!sgl || !sgl_count)
3497 3498 3499 3500
		return 0;

	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512
		/*
		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
		 * scatterlists already have been set to follow what the fabric
		 * passes for the original expected data transfer length.
		 */
		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
			pr_warn("Rejecting SCSI DATA overflow for fabric using"
				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
			return -EINVAL;
		}
3513

3514 3515
		cmd->t_data_sg = sgl;
		cmd->t_data_nents = sgl_count;
3516

3517 3518 3519
		if (sgl_bidi && sgl_bidi_count) {
			cmd->t_bidi_data_sg = sgl_bidi;
			cmd->t_bidi_data_nents = sgl_bidi_count;
3520 3521 3522 3523 3524 3525 3526 3527
		}
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	}

	return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);

3528
void *transport_kmap_data_sg(struct se_cmd *cmd)
3529
{
3530
	struct scatterlist *sg = cmd->t_data_sg;
3531 3532
	struct page **pages;
	int i;
3533

3534
	BUG_ON(!sg);
3535
	/*
3536 3537 3538
	 * We need to take into account a possible offset here for fabrics like
	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3539
	 */
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
	if (!cmd->t_data_nents)
		return NULL;
	else if (cmd->t_data_nents == 1)
		return kmap(sg_page(sg)) + sg->offset;

	/* >1 page. use vmap */
	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
	if (!pages)
		return NULL;

	/* convert sg[] to pages[] */
	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
		pages[i] = sg_page(sg);
	}

	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
	kfree(pages);
	if (!cmd->t_data_vmap)
		return NULL;

	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3561
}
3562
EXPORT_SYMBOL(transport_kmap_data_sg);
3563

3564
void transport_kunmap_data_sg(struct se_cmd *cmd)
3565
{
3566
	if (!cmd->t_data_nents) {
3567
		return;
3568
	} else if (cmd->t_data_nents == 1) {
3569
		kunmap(sg_page(cmd->t_data_sg));
3570 3571
		return;
	}
3572 3573 3574

	vunmap(cmd->t_data_vmap);
	cmd->t_data_vmap = NULL;
3575
}
3576
EXPORT_SYMBOL(transport_kunmap_data_sg);
3577

3578
static int
3579
transport_generic_get_mem(struct se_cmd *cmd)
3580
{
3581 3582 3583
	u32 length = cmd->data_length;
	unsigned int nents;
	struct page *page;
3584
	gfp_t zero_flag;
3585
	int i = 0;
3586

3587 3588 3589 3590
	nents = DIV_ROUND_UP(length, PAGE_SIZE);
	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
	if (!cmd->t_data_sg)
		return -ENOMEM;
3591

3592 3593
	cmd->t_data_nents = nents;
	sg_init_table(cmd->t_data_sg, nents);
3594

3595 3596
	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;

3597 3598
	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);
3599
		page = alloc_page(GFP_KERNEL | zero_flag);
3600 3601
		if (!page)
			goto out;
3602

3603 3604 3605
		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
		length -= page_len;
		i++;
3606 3607 3608
	}
	return 0;

3609 3610 3611 3612
out:
	while (i >= 0) {
		__free_page(sg_page(&cmd->t_data_sg[i]));
		i--;
3613
	}
3614 3615 3616
	kfree(cmd->t_data_sg);
	cmd->t_data_sg = NULL;
	return -ENOMEM;
3617 3618
}

3619 3620
/* Reduce sectors if they are too long for the device */
static inline sector_t transport_limit_task_sectors(
3621 3622
	struct se_device *dev,
	unsigned long long lba,
3623
	sector_t sectors)
3624
{
3625
	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3626

3627 3628 3629
	if (dev->transport->get_device_type(dev) == TYPE_DISK)
		if ((lba + sectors) > transport_dev_end_lba(dev))
			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3630

3631
	return sectors;
3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642
}


/*
 * This function can be used by HW target mode drivers to create a linked
 * scatterlist from all contiguously allocated struct se_task->task_sg[].
 * This is intended to be called during the completion path by TCM Core
 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
 */
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
3643 3644 3645 3646
	struct scatterlist *sg_first = NULL;
	struct scatterlist *sg_prev = NULL;
	int sg_prev_nents = 0;
	struct scatterlist *sg;
3647
	struct se_task *task;
3648
	u32 chained_nents = 0;
3649 3650
	int i;

3651 3652
	BUG_ON(!cmd->se_tfo->task_sg_chaining);

3653 3654
	/*
	 * Walk the struct se_task list and setup scatterlist chains
3655
	 * for each contiguously allocated struct se_task->task_sg[].
3656
	 */
3657
	list_for_each_entry(task, &cmd->t_task_list, t_list) {
3658
		if (!task->task_sg)
3659 3660
			continue;

3661 3662
		if (!sg_first) {
			sg_first = task->task_sg;
3663
			chained_nents = task->task_sg_nents;
3664
		} else {
3665
			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3666
			chained_nents += task->task_sg_nents;
3667
		}
3668 3669 3670
		/*
		 * For the padded tasks, use the extra SGL vector allocated
		 * in transport_allocate_data_tasks() for the sg_prev_nents
3671 3672 3673 3674 3675
		 * offset into sg_chain() above.
		 *
		 * We do not need the padding for the last task (or a single
		 * task), but in that case we will never use the sg_prev_nents
		 * value below which would be incorrect.
3676
		 */
3677
		sg_prev_nents = (task->task_sg_nents + 1);
3678
		sg_prev = task->task_sg;
3679 3680 3681 3682 3683
	}
	/*
	 * Setup the starting pointer and total t_tasks_sg_linked_no including
	 * padding SGs for linking and to mark the end.
	 */
3684
	cmd->t_tasks_sg_chained = sg_first;
3685
	cmd->t_tasks_sg_chained_no = chained_nents;
3686

3687
	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3688 3689
		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
		cmd->t_tasks_sg_chained_no);
3690

3691 3692
	for_each_sg(cmd->t_tasks_sg_chained, sg,
			cmd->t_tasks_sg_chained_no, i) {
3693

3694
		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3695
			i, sg, sg_page(sg), sg->length, sg->offset);
3696
		if (sg_is_chain(sg))
3697
			pr_debug("SG: %p sg_is_chain=1\n", sg);
3698
		if (sg_is_last(sg))
3699
			pr_debug("SG: %p sg_is_last=1\n", sg);
3700 3701 3702 3703
	}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);

3704 3705 3706
/*
 * Break up cmd into chunks transport can handle
 */
3707 3708
static int
transport_allocate_data_tasks(struct se_cmd *cmd,
3709
	enum dma_data_direction data_direction,
3710
	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3711
{
3712
	struct se_device *dev = cmd->se_dev;
3713
	int task_count, i;
3714 3715 3716 3717 3718 3719 3720 3721 3722
	unsigned long long lba;
	sector_t sectors, dev_max_sectors;
	u32 sector_size;

	if (transport_cmd_get_valid_sectors(cmd) < 0)
		return -EINVAL;

	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3723

3724
	WARN_ON(cmd->data_length % sector_size);
3725 3726

	lba = cmd->t_task_lba;
3727
	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3728
	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755

	/*
	 * If we need just a single task reuse the SG list in the command
	 * and avoid a lot of work.
	 */
	if (task_count == 1) {
		struct se_task *task;
		unsigned long flags;

		task = transport_generic_get_task(cmd, data_direction);
		if (!task)
			return -ENOMEM;

		task->task_sg = cmd_sg;
		task->task_sg_nents = sgl_nents;

		task->task_lba = lba;
		task->task_sectors = sectors;
		task->task_size = task->task_sectors * sector_size;

		spin_lock_irqsave(&cmd->t_state_lock, flags);
		list_add_tail(&task->t_list, &cmd->t_task_list);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);

		return task_count;
	}

3756
	for (i = 0; i < task_count; i++) {
3757
		struct se_task *task;
3758
		unsigned int task_size, task_sg_nents_padded;
3759 3760
		struct scatterlist *sg;
		unsigned long flags;
3761
		int count;
3762

3763
		task = transport_generic_get_task(cmd, data_direction);
3764
		if (!task)
3765
			return -ENOMEM;
3766 3767

		task->task_lba = lba;
3768 3769
		task->task_sectors = min(sectors, dev_max_sectors);
		task->task_size = task->task_sectors * sector_size;
3770

3771 3772 3773 3774 3775
		/*
		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
		 * in order to calculate the number per task SGL entries
		 */
		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3776
		/*
3777 3778 3779
		 * Check if the fabric module driver is requesting that all
		 * struct se_task->task_sg[] be chained together..  If so,
		 * then allocate an extra padding SG entry for linking and
3780 3781 3782
		 * marking the end of the chained SGL for every task except
		 * the last one for (task_count > 1) operation, or skipping
		 * the extra padding for the (task_count == 1) case.
3783
		 */
3784 3785 3786 3787
		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
			task_sg_nents_padded = (task->task_sg_nents + 1);
		} else
			task_sg_nents_padded = task->task_sg_nents;
3788

3789
		task->task_sg = kmalloc(sizeof(struct scatterlist) *
3790
					task_sg_nents_padded, GFP_KERNEL);
3791 3792 3793 3794 3795
		if (!task->task_sg) {
			cmd->se_dev->transport->free_task(task);
			return -ENOMEM;
		}

3796
		sg_init_table(task->task_sg, task_sg_nents_padded);
3797

3798 3799 3800
		task_size = task->task_size;

		/* Build new sgl, only up to task_size */
3801
		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3802 3803 3804 3805 3806 3807
			if (cmd_sg->length > task_size)
				break;

			*sg = *cmd_sg;
			task_size -= cmd_sg->length;
			cmd_sg = sg_next(cmd_sg);
3808 3809
		}

3810 3811
		lba += task->task_sectors;
		sectors -= task->task_sectors;
3812

3813 3814 3815
		spin_lock_irqsave(&cmd->t_state_lock, flags);
		list_add_tail(&task->t_list, &cmd->t_task_list);
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3816 3817
	}

3818
	return task_count;
3819 3820 3821
}

static int
3822
transport_allocate_control_task(struct se_cmd *cmd)
3823 3824
{
	struct se_task *task;
3825
	unsigned long flags;
3826

3827 3828 3829 3830 3831
	/* Workaround for handling zero-length control CDBs */
	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
	    !cmd->data_length)
		return 0;

3832 3833
	task = transport_generic_get_task(cmd, cmd->data_direction);
	if (!task)
3834
		return -ENOMEM;
3835

3836
	task->task_sg = cmd->t_data_sg;
3837
	task->task_size = cmd->data_length;
3838
	task->task_sg_nents = cmd->t_data_nents;
3839

3840 3841 3842
	spin_lock_irqsave(&cmd->t_state_lock, flags);
	list_add_tail(&task->t_list, &cmd->t_task_list);
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3843

3844
	/* Success! Return number of tasks allocated */
3845
	return 1;
3846 3847
}

3848 3849 3850 3851
/*
 * Allocate any required ressources to execute the command, and either place
 * it on the execution queue if possible.  For writes we might not have the
 * payload yet, thus notify the fabric via a call to ->write_pending instead.
3852
 */
3853
int transport_generic_new_cmd(struct se_cmd *cmd)
3854
{
3855
	struct se_device *dev = cmd->se_dev;
3856
	int task_cdbs, task_cdbs_bidi = 0;
3857
	int set_counts = 1;
3858 3859 3860 3861 3862
	int ret = 0;

	/*
	 * Determine is the TCM fabric module has already allocated physical
	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3863
	 * beforehand.
3864
	 */
3865 3866
	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
	    cmd->data_length) {
3867
		ret = transport_generic_get_mem(cmd);
3868
		if (ret < 0)
3869
			goto out_fail;
3870
	}
3871

3872
	/*
3873
	 * For BIDI command set up the read tasks first.
3874
	 */
3875
	if (cmd->t_bidi_data_sg &&
3876 3877 3878
	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));

3879 3880 3881 3882
		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
				cmd->t_bidi_data_nents);
		if (task_cdbs_bidi <= 0)
3883 3884 3885 3886 3887 3888
			goto out_fail;

		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
		set_counts = 0;
	}
3889 3890 3891 3892 3893 3894 3895 3896 3897

	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
		task_cdbs = transport_allocate_data_tasks(cmd,
					cmd->data_direction, cmd->t_data_sg,
					cmd->t_data_nents);
	} else {
		task_cdbs = transport_allocate_control_task(cmd);
	}

3898
	if (task_cdbs < 0)
3899
		goto out_fail;
3900
	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3901
		spin_lock_irq(&cmd->t_state_lock);
3902
		cmd->t_state = TRANSPORT_COMPLETE;
3903 3904
		cmd->transport_state |= CMD_T_ACTIVE;
		spin_unlock_irq(&cmd->t_state_lock);
3905 3906 3907 3908 3909 3910 3911 3912

		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
			u8 ua_asc = 0, ua_ascq = 0;

			core_scsi3_ua_clear_for_request_sense(cmd,
					&ua_asc, &ua_ascq);
		}

3913 3914 3915 3916
		INIT_WORK(&cmd->work, target_complete_ok_work);
		queue_work(target_completion_wq, &cmd->work);
		return 0;
	}
3917 3918 3919 3920 3921 3922

	if (set_counts) {
		atomic_inc(&cmd->t_fe_count);
		atomic_inc(&cmd->t_se_count);
	}

3923 3924 3925
	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3926

3927
	/*
3928
	 * For WRITEs, let the fabric know its buffer is ready..
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943
	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
	 * will be added to the struct se_device execution queue after its WRITE
	 * data has arrived. (ie: It gets handled by the transport processing
	 * thread a second time)
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
		transport_add_tasks_to_state_queue(cmd);
		return transport_generic_write_pending(cmd);
	}
	/*
	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
	 * to the execution queue.
	 */
	transport_execute_tasks(cmd);
	return 0;
3944 3945 3946 3947 3948

out_fail:
	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	return -EINVAL;
3949
}
3950
EXPORT_SYMBOL(transport_generic_new_cmd);
3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961

/*	transport_generic_process_write():
 *
 *
 */
void transport_generic_process_write(struct se_cmd *cmd)
{
	transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);

3962
static void transport_write_pending_qf(struct se_cmd *cmd)
3963
{
3964 3965 3966 3967
	int ret;

	ret = cmd->se_tfo->write_pending(cmd);
	if (ret == -EAGAIN || ret == -ENOMEM) {
3968 3969 3970 3971
		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
			 cmd);
		transport_handle_queue_full(cmd, cmd->se_dev);
	}
3972 3973
}

3974 3975 3976 3977 3978
static int transport_generic_write_pending(struct se_cmd *cmd)
{
	unsigned long flags;
	int ret;

3979
	spin_lock_irqsave(&cmd->t_state_lock, flags);
3980
	cmd->t_state = TRANSPORT_WRITE_PENDING;
3981
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3982

3983 3984
	/*
	 * Clear the se_cmd for WRITE_PENDING status in order to set
3985 3986 3987
	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
	 * from HW target mode interrupt code.  This is safe to be called
	 * with transport_off=1 before the cmd->se_tfo->write_pending
3988 3989 3990 3991 3992 3993 3994 3995
	 * because the se_cmd->se_lun pointer is not being cleared.
	 */
	transport_cmd_check_stop(cmd, 1, 0);

	/*
	 * Call the fabric write_pending function here to let the
	 * frontend know that WRITE buffers are ready.
	 */
3996
	ret = cmd->se_tfo->write_pending(cmd);
3997
	if (ret == -EAGAIN || ret == -ENOMEM)
3998 3999
		goto queue_full;
	else if (ret < 0)
4000 4001
		return ret;

4002
	return 1;
4003 4004

queue_full:
4005
	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4006
	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4007
	transport_handle_queue_full(cmd, cmd->se_dev);
4008
	return 0;
4009 4010
}

4011
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
4012
{
4013
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
4014
		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
4015 4016
			 transport_wait_for_tasks(cmd);

4017
		transport_release_cmd(cmd);
4018 4019 4020 4021
	} else {
		if (wait_for_tasks)
			transport_wait_for_tasks(cmd);

4022 4023
		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);

4024
		if (cmd->se_lun)
4025 4026
			transport_lun_remove_cmd(cmd);

4027 4028
		transport_free_dev_tasks(cmd);

4029
		transport_put_cmd(cmd);
4030 4031 4032 4033
	}
}
EXPORT_SYMBOL(transport_generic_free_cmd);

4034 4035 4036
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to add
4037
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
4038
 */
4039 4040
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
			bool ack_kref)
4041 4042 4043
{
	unsigned long flags;

4044
	kref_init(&se_cmd->cmd_kref);
4045 4046 4047 4048 4049
	/*
	 * Add a second kref if the fabric caller is expecting to handle
	 * fabric acknowledgement that requires two target_put_sess_cmd()
	 * invocations before se_cmd descriptor release.
	 */
4050
	if (ack_kref == true) {
4051
		kref_get(&se_cmd->cmd_kref);
4052 4053
		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
	}
4054

4055 4056 4057 4058 4059 4060 4061
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
	se_cmd->check_release = 1;
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_get_sess_cmd);

4062
static void target_release_cmd_kref(struct kref *kref)
4063
{
4064 4065
	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
	struct se_session *se_sess = se_cmd->se_sess;
4066 4067 4068 4069 4070
	unsigned long flags;

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	if (list_empty(&se_cmd->se_cmd_list)) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4071
		se_cmd->se_tfo->release_cmd(se_cmd);
4072
		return;
4073 4074 4075 4076
	}
	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
		complete(&se_cmd->cmd_wait_comp);
4077
		return;
4078 4079 4080 4081
	}
	list_del(&se_cmd->se_cmd_list);
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
	se_cmd->se_tfo->release_cmd(se_cmd);
}

/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
 * @se_sess:	session to reference
 * @se_cmd:	command descriptor to drop
 */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160
}
EXPORT_SYMBOL(target_put_sess_cmd);

/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 * @se_sess:	session to split
 */
void target_splice_sess_cmd_list(struct se_session *se_sess)
{
	struct se_cmd *se_cmd;
	unsigned long flags;

	WARN_ON(!list_empty(&se_sess->sess_wait_list));
	INIT_LIST_HEAD(&se_sess->sess_wait_list);

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
	se_sess->sess_tearing_down = 1;

	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);

	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
		se_cmd->cmd_wait_set = 1;

	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_splice_sess_cmd_list);

/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 * @se_sess:    session to wait for active I/O
 * @wait_for_tasks:	Make extra transport_wait_for_tasks call
 */
void target_wait_for_sess_cmds(
	struct se_session *se_sess,
	int wait_for_tasks)
{
	struct se_cmd *se_cmd, *tmp_cmd;
	bool rc = false;

	list_for_each_entry_safe(se_cmd, tmp_cmd,
				&se_sess->sess_wait_list, se_cmd_list) {
		list_del(&se_cmd->se_cmd_list);

		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
			" %d\n", se_cmd, se_cmd->t_state,
			se_cmd->se_tfo->get_cmd_state(se_cmd));

		if (wait_for_tasks) {
			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));

			rc = transport_wait_for_tasks(se_cmd);

			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		if (!rc) {
			wait_for_completion(&se_cmd->cmd_wait_comp);
			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
				" fabric state: %d\n", se_cmd, se_cmd->t_state,
				se_cmd->se_tfo->get_cmd_state(se_cmd));
		}

		se_cmd->se_tfo->release_cmd(se_cmd);
	}
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
/*	transport_lun_wait_for_tasks():
 *
 *	Called from ConfigFS context to stop the passed struct se_cmd to allow
 *	an struct se_lun to be successfully shutdown.
 */
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
{
	unsigned long flags;
	int ret;
	/*
	 * If the frontend has already requested this struct se_cmd to
	 * be stopped, we can safely ignore this struct se_cmd.
	 */
4174
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4175 4176 4177 4178 4179
	if (cmd->transport_state & CMD_T_STOP) {
		cmd->transport_state &= ~CMD_T_LUN_STOP;

		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
			 cmd->se_tfo->get_task_tag(cmd));
4180
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4181
		transport_cmd_check_stop(cmd, 1, 0);
4182
		return -EPERM;
4183
	}
4184
	cmd->transport_state |= CMD_T_LUN_FE_STOP;
4185
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4186

4187
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4188 4189 4190

	ret = transport_stop_tasks_for_cmd(cmd);

4191 4192
	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
			" %d\n", cmd, cmd->t_task_list_num, ret);
4193
	if (!ret) {
4194
		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4195
				cmd->se_tfo->get_task_tag(cmd));
4196
		wait_for_completion(&cmd->transport_lun_stop_comp);
4197
		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4198
				cmd->se_tfo->get_task_tag(cmd));
4199
	}
4200
	transport_remove_cmd_from_queue(cmd);
4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213

	return 0;
}

static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct se_cmd *cmd = NULL;
	unsigned long lun_flags, cmd_flags;
	/*
	 * Do exception processing and return CHECK_CONDITION status to the
	 * Initiator Port.
	 */
	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4214 4215 4216
	while (!list_empty(&lun->lun_cmd_list)) {
		cmd = list_first_entry(&lun->lun_cmd_list,
		       struct se_cmd, se_lun_node);
4217
		list_del_init(&cmd->se_lun_node);
4218

4219 4220 4221 4222 4223
		/*
		 * This will notify iscsi_target_transport.c:
		 * transport_cmd_check_stop() that a LUN shutdown is in
		 * progress for the iscsi_cmd_t.
		 */
4224
		spin_lock(&cmd->t_state_lock);
4225
		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4226
			"_lun_stop for  ITT: 0x%08x\n",
4227 4228
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4229
		cmd->transport_state |= CMD_T_LUN_STOP;
4230
		spin_unlock(&cmd->t_state_lock);
4231 4232 4233

		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);

4234 4235
		if (!cmd->se_lun) {
			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4236 4237
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4238 4239 4240 4241 4242 4243
			BUG();
		}
		/*
		 * If the Storage engine still owns the iscsi_cmd_t, determine
		 * and/or stop its context.
		 */
4244
		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4245 4246
			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4247

4248
		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4249 4250 4251 4252
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}

4253
		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4254
			"_wait_for_tasks(): SUCCESS\n",
4255 4256
			cmd->se_lun->unpacked_lun,
			cmd->se_tfo->get_task_tag(cmd));
4257

4258
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4259
		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4260
			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4261 4262
			goto check_cond;
		}
4263
		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4264
		transport_all_task_dev_remove_state(cmd);
4265
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281

		transport_free_dev_tasks(cmd);
		/*
		 * The Storage engine stopped this struct se_cmd before it was
		 * send to the fabric frontend for delivery back to the
		 * Initiator Node.  Return this SCSI CDB back with an
		 * CHECK_CONDITION status.
		 */
check_cond:
		transport_send_check_condition_and_sense(cmd,
				TCM_NON_EXISTENT_LUN, 0);
		/*
		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
		 * be released, notify the waiting thread now that LU has
		 * finished accessing it.
		 */
4282
		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4283
		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4284
			pr_debug("SE_LUN[%d] - Detected FE stop for"
4285 4286
				" struct se_cmd: %p ITT: 0x%08x\n",
				lun->unpacked_lun,
4287
				cmd, cmd->se_tfo->get_task_tag(cmd));
4288

4289
			spin_unlock_irqrestore(&cmd->t_state_lock,
4290 4291
					cmd_flags);
			transport_cmd_check_stop(cmd, 1, 0);
4292
			complete(&cmd->transport_lun_fe_stop_comp);
4293 4294 4295
			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
			continue;
		}
4296
		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4297
			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4298

4299
		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4300 4301 4302 4303 4304 4305 4306
		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
	}
	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}

static int transport_clear_lun_thread(void *p)
{
J
Jörn Engel 已提交
4307
	struct se_lun *lun = p;
4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318

	__transport_clear_lun_from_sessions(lun);
	complete(&lun->lun_shutdown_comp);

	return 0;
}

int transport_clear_lun_from_sessions(struct se_lun *lun)
{
	struct task_struct *kt;

4319
	kt = kthread_run(transport_clear_lun_thread, lun,
4320 4321
			"tcm_cl_%u", lun->unpacked_lun);
	if (IS_ERR(kt)) {
4322
		pr_err("Unable to start clear_lun thread\n");
4323
		return PTR_ERR(kt);
4324 4325 4326 4327 4328 4329
	}
	wait_for_completion(&lun->lun_shutdown_comp);

	return 0;
}

4330 4331 4332
/**
 * transport_wait_for_tasks - wait for completion to occur
 * @cmd:	command to wait
4333
 *
4334 4335
 * Called from frontend fabric context to wait for storage engine
 * to pause and/or release frontend generated struct se_cmd.
4336
 */
4337
bool transport_wait_for_tasks(struct se_cmd *cmd)
4338 4339 4340
{
	unsigned long flags;

4341
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4342 4343
	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4344
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4345
		return false;
4346 4347 4348 4349 4350
	}
	/*
	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
	 * has been set in transport_set_supported_SAM_opcode().
	 */
4351 4352
	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4353
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4354
		return false;
4355
	}
4356 4357 4358
	/*
	 * If we are already stopped due to an external event (ie: LUN shutdown)
	 * sleep until the connection can have the passed struct se_cmd back.
4359
	 * The cmd->transport_lun_stopped_sem will be upped by
4360 4361 4362
	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
	 * has completed its operation on the struct se_cmd.
	 */
4363
	if (cmd->transport_state & CMD_T_LUN_STOP) {
4364
		pr_debug("wait_for_tasks: Stopping"
4365
			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4366
			"_stop_comp); for ITT: 0x%08x\n",
4367
			cmd->se_tfo->get_task_tag(cmd));
4368 4369 4370 4371 4372 4373 4374
		/*
		 * There is a special case for WRITES where a FE exception +
		 * LUN shutdown means ConfigFS context is still sleeping on
		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
		 * We go ahead and up transport_lun_stop_comp just to be sure
		 * here.
		 */
4375 4376 4377 4378
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		complete(&cmd->transport_lun_stop_comp);
		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
		spin_lock_irqsave(&cmd->t_state_lock, flags);
4379 4380 4381 4382 4383 4384 4385

		transport_all_task_dev_remove_state(cmd);
		/*
		 * At this point, the frontend who was the originator of this
		 * struct se_cmd, now owns the structure and can be released through
		 * normal means below.
		 */
4386
		pr_debug("wait_for_tasks: Stopped"
4387
			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4388
			"stop_comp); for ITT: 0x%08x\n",
4389
			cmd->se_tfo->get_task_tag(cmd));
4390

4391
		cmd->transport_state &= ~CMD_T_LUN_STOP;
4392
	}
4393

4394
	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
4395
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4396
		return false;
4397
	}
4398

4399
	cmd->transport_state |= CMD_T_STOP;
4400

4401
	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4402
		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4403 4404
		cmd, cmd->se_tfo->get_task_tag(cmd),
		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4405

4406
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4407

4408
	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4409

4410
	wait_for_completion(&cmd->t_transport_stop_comp);
4411

4412
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4413
	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4414

4415
	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4416
		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4417
		cmd->se_tfo->get_task_tag(cmd));
4418

4419
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4420 4421

	return true;
4422
}
4423
EXPORT_SYMBOL(transport_wait_for_tasks);
4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456

static int transport_get_sense_codes(
	struct se_cmd *cmd,
	u8 *asc,
	u8 *ascq)
{
	*asc = cmd->scsi_asc;
	*ascq = cmd->scsi_ascq;

	return 0;
}

static int transport_set_sense_codes(
	struct se_cmd *cmd,
	u8 asc,
	u8 ascq)
{
	cmd->scsi_asc = asc;
	cmd->scsi_ascq = ascq;

	return 0;
}

int transport_send_check_condition_and_sense(
	struct se_cmd *cmd,
	u8 reason,
	int from_transport)
{
	unsigned char *buffer = cmd->sense_buffer;
	unsigned long flags;
	int offset;
	u8 asc = 0, ascq = 0;

4457
	spin_lock_irqsave(&cmd->t_state_lock, flags);
4458
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4459
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4460 4461 4462
		return 0;
	}
	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4463
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475

	if (!reason && from_transport)
		goto after_reason;

	if (!from_transport)
		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
	/*
	 * Data Segment and SenseLength of the fabric response PDU.
	 *
	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
	 * from include/scsi/scsi_cmnd.h
	 */
4476
	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4477 4478 4479 4480 4481 4482 4483
				TRANSPORT_SENSE_BUFFER);
	/*
	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
	 * SENSE KEY values from include/scsi/scsi.h
	 */
	switch (reason) {
	case TCM_NON_EXISTENT_LUN:
4484 4485
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4486
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4487 4488 4489 4490 4491
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT NOT SUPPORTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
		break;
4492 4493 4494 4495
	case TCM_UNSUPPORTED_SCSI_OPCODE:
	case TCM_SECTOR_COUNT_TOO_MANY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4496
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4497 4498 4499 4500 4501 4502 4503 4504
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID COMMAND OPERATION CODE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
		break;
	case TCM_UNKNOWN_MODE_PAGE:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4505
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4506 4507 4508 4509 4510 4511 4512 4513
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_CHECK_CONDITION_ABORT_CMD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4514
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4515 4516 4517 4518 4519 4520 4521 4522 4523
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* BUS DEVICE RESET FUNCTION OCCURRED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
		break;
	case TCM_INCORRECT_AMOUNT_OF_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4524
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* NOT ENOUGH UNSOLICITED DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
		break;
	case TCM_INVALID_CDB_FIELD:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4535
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4536 4537
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4538 4539 4540 4541 4542 4543
		/* INVALID FIELD IN CDB */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
		break;
	case TCM_INVALID_PARAMETER_LIST:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4544
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4545 4546
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4547 4548 4549 4550 4551 4552
		/* INVALID FIELD IN PARAMETER LIST */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
		break;
	case TCM_UNEXPECTED_UNSOLICITED_DATA:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4553
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4554 4555 4556 4557 4558 4559 4560 4561 4562 4563
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* WRITE ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
		/* UNEXPECTED_UNSOLICITED_DATA */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
		break;
	case TCM_SERVICE_CRC_ERROR:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4564
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4565 4566 4567 4568 4569 4570 4571 4572 4573 4574
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* PROTOCOL SERVICE CRC ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
		/* N/A */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
		break;
	case TCM_SNACK_REJECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4575
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4576 4577 4578 4579 4580 4581 4582 4583 4584 4585
		/* ABORTED COMMAND */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
		/* READ ERROR */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
		/* FAILED RETRANSMISSION REQUEST */
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
		break;
	case TCM_WRITE_PROTECTED:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4586
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4587 4588 4589 4590 4591 4592 4593 4594
		/* DATA PROTECT */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
		/* WRITE PROTECTED */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
		break;
	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4595
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4596 4597 4598 4599 4600 4601 4602 4603 4604
		/* UNIT ATTENTION */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_CHECK_CONDITION_NOT_READY:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4605
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4606 4607 4608 4609 4610 4611 4612 4613 4614 4615
		/* Not Ready */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
		transport_get_sense_codes(cmd, &asc, &ascq);
		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
		break;
	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
	default:
		/* CURRENT ERROR */
		buffer[offset] = 0x70;
4616
		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
		/* ILLEGAL REQUEST */
		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
		/* LOGICAL UNIT COMMUNICATION FAILURE */
		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
		break;
	}
	/*
	 * This code uses linux/include/scsi/scsi.h SAM status codes!
	 */
	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
	/*
	 * Automatically padded, this value is encoded in the fabric's
	 * data_length response PDU containing the SCSI defined sense data.
	 */
	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;

after_reason:
4634
	return cmd->se_tfo->queue_status(cmd);
4635 4636 4637 4638 4639 4640 4641
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);

int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
	int ret = 0;

4642
	if (cmd->transport_state & CMD_T_ABORTED) {
4643
		if (!send_status ||
4644 4645 4646
		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
			return 1;
#if 0
4647
		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4648
			" status for CDB: 0x%02x ITT: 0x%08x\n",
4649
			cmd->t_task_cdb[0],
4650
			cmd->se_tfo->get_task_tag(cmd));
4651 4652
#endif
		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4653
		cmd->se_tfo->queue_status(cmd);
4654 4655 4656 4657 4658 4659 4660 4661
		ret = 1;
	}
	return ret;
}
EXPORT_SYMBOL(transport_check_aborted_status);

void transport_send_task_abort(struct se_cmd *cmd)
{
4662 4663 4664 4665 4666 4667 4668 4669 4670
	unsigned long flags;

	spin_lock_irqsave(&cmd->t_state_lock, flags);
	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cmd->t_state_lock, flags);

4671 4672 4673 4674 4675 4676 4677
	/*
	 * If there are still expected incoming fabric WRITEs, we wait
	 * until until they have completed before sending a TASK_ABORTED
	 * response.  This response with TASK_ABORTED status will be
	 * queued back to fabric module by transport_check_aborted_status().
	 */
	if (cmd->data_direction == DMA_TO_DEVICE) {
4678
		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4679
			cmd->transport_state |= CMD_T_ABORTED;
4680 4681 4682 4683 4684
			smp_mb__after_atomic_inc();
		}
	}
	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
4685
	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4686
		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4687
		cmd->se_tfo->get_task_tag(cmd));
4688
#endif
4689
	cmd->se_tfo->queue_status(cmd);
4690 4691
}

C
Christoph Hellwig 已提交
4692
static int transport_generic_do_tmr(struct se_cmd *cmd)
4693
{
4694
	struct se_device *dev = cmd->se_dev;
4695 4696 4697 4698
	struct se_tmr_req *tmr = cmd->se_tmr_req;
	int ret;

	switch (tmr->function) {
4699
	case TMR_ABORT_TASK:
4700
		core_tmr_abort_task(dev, tmr, cmd->se_sess);
4701
		break;
4702 4703 4704
	case TMR_ABORT_TASK_SET:
	case TMR_CLEAR_ACA:
	case TMR_CLEAR_TASK_SET:
4705 4706
		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
		break;
4707
	case TMR_LUN_RESET:
4708 4709 4710 4711
		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
					 TMR_FUNCTION_REJECTED;
		break;
4712
	case TMR_TARGET_WARM_RESET:
4713 4714
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
4715
	case TMR_TARGET_COLD_RESET:
4716 4717 4718
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	default:
4719
		pr_err("Uknown TMR function: 0x%02x.\n",
4720 4721 4722 4723 4724 4725
				tmr->function);
		tmr->response = TMR_FUNCTION_REJECTED;
		break;
	}

	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4726
	cmd->se_tfo->queue_tm_rsp(cmd);
4727

4728
	transport_cmd_check_stop_to_fabric(cmd);
4729 4730 4731 4732 4733 4734 4735 4736 4737
	return 0;
}

/*	transport_processing_thread():
 *
 *
 */
static int transport_processing_thread(void *param)
{
4738
	int ret;
4739
	struct se_cmd *cmd;
J
Jörn Engel 已提交
4740
	struct se_device *dev = param;
4741 4742

	while (!kthread_should_stop()) {
4743 4744
		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4745 4746 4747 4748 4749
				kthread_should_stop());
		if (ret < 0)
			goto out;

get_cmd:
4750 4751
		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
		if (!cmd)
4752 4753
			continue;

4754
		switch (cmd->t_state) {
4755 4756 4757
		case TRANSPORT_NEW_CMD:
			BUG();
			break;
4758
		case TRANSPORT_NEW_CMD_MAP:
4759 4760
			if (!cmd->se_tfo->new_cmd_map) {
				pr_err("cmd->se_tfo->new_cmd_map is"
4761 4762 4763
					" NULL for TRANSPORT_NEW_CMD_MAP\n");
				BUG();
			}
4764
			ret = cmd->se_tfo->new_cmd_map(cmd);
4765
			if (ret < 0) {
4766
				transport_generic_request_failure(cmd);
4767 4768 4769
				break;
			}
			ret = transport_generic_new_cmd(cmd);
4770
			if (ret < 0) {
4771 4772
				transport_generic_request_failure(cmd);
				break;
4773 4774 4775 4776 4777 4778 4779 4780
			}
			break;
		case TRANSPORT_PROCESS_WRITE:
			transport_generic_process_write(cmd);
			break;
		case TRANSPORT_PROCESS_TMR:
			transport_generic_do_tmr(cmd);
			break;
4781
		case TRANSPORT_COMPLETE_QF_WP:
4782 4783 4784 4785
			transport_write_pending_qf(cmd);
			break;
		case TRANSPORT_COMPLETE_QF_OK:
			transport_complete_qf(cmd);
4786
			break;
4787
		default:
4788 4789 4790
			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
				"i_state: %d on SE LUN: %u\n",
				cmd->t_state,
4791 4792 4793
				cmd->se_tfo->get_task_tag(cmd),
				cmd->se_tfo->get_cmd_state(cmd),
				cmd->se_lun->unpacked_lun);
4794 4795 4796 4797 4798 4799 4800
			BUG();
		}

		goto get_cmd;
	}

out:
4801 4802
	WARN_ON(!list_empty(&dev->state_task_list));
	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4803 4804 4805
	dev->process_thread = NULL;
	return 0;
}