qla_target.c 187.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
 *
 *  based on qla2x00t.c code:
 *
 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
 *  Copyright (C) 2004 - 2005 Leonid Stoljar
 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
 *  Copyright (C) 2006 - 2010 ID7 Ltd.
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
13
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation, version 2
 *  of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>

#include "qla_def.h"
#include "qla_target.h"

45 46 47 48 49
static int ql2xtgt_tape_enable;
module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xtgt_tape_enable,
		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");

50 51 52 53 54 55 56 57
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
	"Determines when initiator mode will be enabled. Possible values: "
	"\"exclusive\" - initiator mode will be enabled on load, "
	"disabled on enabling target mode and then on disabling target mode "
	"enabled back; "
	"\"disabled\" - initiator mode will never be enabled; "
58 59
	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
	"when ready "
60 61
	"\"enabled\" (default) - initiator mode will always stay enabled.");

62
static int ql_dm_tgt_ex_pct = 0;
63 64 65 66 67 68
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
	"For Dual Mode (qlini_mode=dual), this parameter determines "
	"the percentage of exchanges/cmds FW will allocate resources "
	"for Target mode.");

69
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
70

71 72
static int temp_sam_status = SAM_STAT_BUSY;

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * From scsi/fc/fc_fcp.h
 */
enum fcp_resp_rsp_codes {
	FCP_TMF_CMPL = 0,
	FCP_DATA_LEN_INVALID = 1,
	FCP_CMND_FIELDS_INVALID = 2,
	FCP_DATA_PARAM_MISMATCH = 3,
	FCP_TMF_REJECTED = 4,
	FCP_TMF_FAILED = 5,
	FCP_TMF_INVALID_LUN = 9,
};

/*
 * fc_pri_ta from scsi/fc/fc_fcp.h
 */
#define FCP_PTA_SIMPLE      0   /* simple task attribute */
#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
#define FCP_PTA_ORDERED     2   /* ordered task attribute */
92
#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#define FCP_PTA_MASK        7   /* mask for task attribute field */
#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */

/*
 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 * must be called under HW lock and could unlock/lock it inside.
 * It isn't an issue, since in the current implementation on the time when
 * those functions are called:
 *
 *   - Either context is IRQ and only IRQ handler can modify HW data,
 *     including rings related fields,
 *
 *   - Or access to target mode variables from struct qla_tgt doesn't
 *     cross those functions boundaries, except tgt_stop, which
 *     additionally protected by irq_cmd_count.
 */
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
112
	struct atio_from_isp *pkt, uint8_t);
113
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
114
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
115 116
	int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
117
	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
118 119
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd);
120 121
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull);
122
static void qlt_disable_vha(struct scsi_qla_host *vha);
123
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
124 125 126 127
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
128 129
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked);
130 131 132
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
	fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
133 134 135
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
	struct abts_recv_from_24xx *);

136 137 138 139
/*
 * Global Variables
 */
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
140
static struct kmem_cache *qla_tgt_plogi_cachep;
141 142 143 144 145
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);

146 147 148 149 150 151 152 153 154 155 156 157 158 159
static const char *prot_op_str(u32 prot_op)
{
	switch (prot_op) {
	case TARGET_PROT_NORMAL:	return "NORMAL";
	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
	default:			return "UNKNOWN";
	}
}

160 161 162 163 164 165 166 167 168 169
/* This API intentionally takes dest as a parameter, rather than returning
 * int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
{
	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
	*dest = atomic_inc_return(&base_vha->generation_tick);
	/* memory barrier */
	wmb();
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
	/* Send marker if required */
	if (unlikely(vha->marker_needed != 0)) {
		int rc = qla2x00_issue_marker(vha, vha_locked);
		if (rc != QLA_SUCCESS) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
			    "qla_target(%d): issue_marker() failed\n",
			    vha->vp_idx);
		}
		return rc;
	}
	return QLA_SUCCESS;
}

static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
	uint8_t *d_id)
{
190 191
	struct scsi_qla_host *host;
	uint32_t key = 0;
192

193 194
	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
	    (vha->d_id.b.al_pa == d_id[2]))
195 196
		return vha;

197 198 199
	key  = (uint32_t)d_id[0] << 16;
	key |= (uint32_t)d_id[1] <<  8;
	key |= (uint32_t)d_id[2];
200

201 202
	host = btree_lookup32(&vha->hw->tgt.host_map, key);
	if (!host)
203 204
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
		    "Unable to find host %06x\n", key);
205 206

	return host;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
}

static inline
struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
	uint16_t vp_idx)
{
	struct qla_hw_data *ha = vha->hw;

	if (vha->vp_idx == vp_idx)
		return vha;

	BUG_ON(ha->tgt.tgt_vp_map == NULL);
	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
		return ha->tgt.tgt_vp_map[vp_idx].vha;

	return NULL;
}

225 226 227 228 229 230 231
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);

	vha->hw->tgt.num_pend_cmds++;
232 233
	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
		vha->qla_stats.stat_max_pend_cmds =
234 235 236 237 238 239 240 241 242 243 244 245
			vha->hw->tgt.num_pend_cmds;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
	vha->hw->tgt.num_pend_cmds--;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}

246 247 248 249 250 251 252 253 254

static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
	struct atio_from_isp *atio,	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;

	if (tgt->tgt_stop) {
255 256 257
		ql_dbg(ql_dbg_async, vha, 0x502c,
		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
		    vha->vp_idx);
258 259 260 261
		goto out_term;
	}

	u = kzalloc(sizeof(*u), GFP_ATOMIC);
262
	if (u == NULL)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
		goto out_term;

	u->vha = vha;
	memcpy(&u->atio, atio, sizeof(*atio));
	INIT_LIST_HEAD(&u->cmd_list);

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	schedule_delayed_work(&vha->unknown_atio_work, 1);

out:
	return;

out_term:
	qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
	goto out;
}

static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u, *t;
	scsi_qla_host_t *host;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;
	uint8_t queued = 0;

	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
		if (u->aborted) {
294 295
			ql_dbg(ql_dbg_async, vha, 0x502e,
			    "Freeing unknown %s %p, because of Abort\n",
296 297 298 299 300 301 302 303
			    "ATIO_TYPE7", u);
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
			goto abort;
		}

		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
		if (host != NULL) {
304 305
			ql_dbg(ql_dbg_async, vha, 0x502f,
			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
306 307
			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
		} else if (tgt->tgt_stop) {
308 309 310
			ql_dbg(ql_dbg_async, vha, 0x503a,
			    "Freeing unknown %s %p, because tgt is being stopped\n",
			    "ATIO_TYPE7", u);
311 312 313
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
		} else {
314 315
			ql_dbg(ql_dbg_async, vha, 0x503d,
			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
			if (!queued) {
				queued = 1;
				schedule_delayed_work(&vha->unknown_atio_work,
				    1);
			}
			continue;
		}

abort:
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
		list_del(&u->cmd_list);
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
		kfree(u);
	}
}

void qlt_unknown_atio_work_fn(struct work_struct *work)
{
	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
	    struct scsi_qla_host, unknown_atio_work);

	qlt_try_to_dequeue_unknown_atios(vha, 0);
}

340
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
341
	struct atio_from_isp *atio, uint8_t ha_locked)
342
{
343 344 345 346 347
	ql_dbg(ql_dbg_tgt, vha, 0xe072,
		"%s: qla_target(%d): type %x ox_id %04x\n",
		__func__, vha->vp_idx, atio->u.raw.entry_type,
		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));

348 349 350 351 352 353 354 355 356 357 358 359
	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
	{
		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
		    atio->u.isp24.fcp_hdr.d_id);
		if (unlikely(NULL == host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
			    "qla_target(%d): Received ATIO_TYPE7 "
			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
			    atio->u.isp24.fcp_hdr.d_id[0],
			    atio->u.isp24.fcp_hdr.d_id[1],
			    atio->u.isp24.fcp_hdr.d_id[2]);
360 361 362


			qlt_queue_unknown_atio(vha, atio, ha_locked);
363 364
			break;
		}
365 366 367
		if (unlikely(!list_empty(&vha->unknown_atio_list)))
			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);

368
		qlt_24xx_atio_pkt(host, atio, ha_locked);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)atio;

		if ((entry->u.isp24.vp_index != 0xFF) &&
		    (entry->u.isp24.nport_handle != 0xFFFF)) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
				    "qla_target(%d): Received "
				    "ATIO (IMMED_NOTIFY_TYPE) "
				    "with unknown vp_index %d\n",
				    vha->vp_idx, entry->u.isp24.vp_index);
				break;
			}
		}
391
		qlt_24xx_atio_pkt(host, atio, ha_locked);
392 393 394
		break;
	}

395 396 397 398 399 400 401 402 403 404 405
	case VP_RPT_ID_IOCB_TYPE:
		qla24xx_report_id_acquisition(vha,
			(struct vp_rpt_id_entry_24xx *)atio);
		break;

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
			(struct abts_recv_from_24xx *)atio;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
			entry->vp_index);
406 407
		unsigned long flags;

408
		if (unlikely(!host)) {
409
			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
410 411 412 413 414
			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
415 416 417 418 419
		if (!ha_locked)
			spin_lock_irqsave(&host->hw->hardware_lock, flags);
		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
		if (!ha_locked)
			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
420 421 422 423 424
		break;
	}

	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */

425 426 427 428 429 430 431
	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe040,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

432
	return false;
433 434 435 436 437
}

void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
{
	switch (pkt->entry_type) {
438 439 440 441
	case CTIO_CRC2:
		ql_dbg(ql_dbg_tgt, vha, 0xe073,
			"qla_target(%d):%s: CRC2 Response pkt\n",
			vha->vp_idx, __func__);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe041,
			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)pkt;

		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe042,
			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->u.isp24.vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case NOTIFY_ACK_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;

		if (0xFF != entry->u.isp24.vp_index) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe043,
				    "qla_target(%d): Response "
				    "pkt (NOTIFY_ACK_TYPE) "
				    "received, with unknown "
				    "vp_index %d\n", vha->vp_idx,
				    entry->u.isp24.vp_index);
				break;
			}
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
		    (struct abts_recv_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe044,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RESP_24XX:
	{
		struct abts_resp_to_24xx *entry =
		    (struct abts_resp_to_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe045,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	default:
		qlt_response_pkt(vha, pkt);
		break;
	}

}

539 540 541
/*
 * All qlt_plogi_ack_t operations are protected by hardware_lock
 */
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	struct qla_work_evt *e;
	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.nack.fcport = fcport;
	e->u.nack.type = type;
	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
	return qla2x00_post_work(vha, e);
}

static
557
void qla2x00_async_nack_sp_done(void *s, int res)
558 559
{
	struct srb *sp = (struct srb *)s;
560
	struct scsi_qla_host *vha = sp->vha;
561 562
	unsigned long flags;

563 564 565
	ql_dbg(ql_dbg_disc, vha, 0x20f2,
	    "Async done-%s res %x %8phC  type %d\n",
	    sp->name, res, sp->fcport->port_name, sp->type);
566 567 568 569 570 571 572 573 574 575

	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	sp->fcport->flags &= ~FCF_ASYNC_SENT;
	sp->fcport->chip_reset = vha->hw->chip_reset;

	switch (sp->type) {
	case SRB_NACK_PLOGI:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
		sp->fcport->logout_on_delete = 1;
576
		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
577 578 579 580 581 582 583 584 585 586 587 588 589 590
		break;

	case SRB_NACK_PRLI:
		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
		sp->fcport->deleted = 0;

		if (!sp->fcport->login_succ &&
		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
			sp->fcport->login_succ = 1;

			vha->fcport_count++;

			if (!IS_IIDMA_CAPABLE(vha->hw) ||
			    !vha->hw->flags.gpsc_supported) {
591 592 593 594 595
				ql_dbg(ql_dbg_disc, vha, 0x20f3,
				    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
596 597 598

				qla24xx_post_upd_fcport_work(vha, sp->fcport);
			} else {
599 600 601 602 603
				ql_dbg(ql_dbg_disc, vha, 0x20f5,
				    "%s %d %8phC post gpsc fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
604 605 606 607 608 609 610 611 612 613 614 615 616 617

				qla24xx_post_gpsc_work(vha, sp->fcport);
			}
		}
		break;

	case SRB_NACK_LOGO:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
		break;
	}
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

618
	sp->free(sp);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	int rval = QLA_FUNCTION_FAILED;
	srb_t *sp;
	char *c = NULL;

	fcport->flags |= FCF_ASYNC_SENT;
	switch (type) {
	case SRB_NACK_PLOGI:
		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
		c = "PLOGI";
		break;
	case SRB_NACK_PRLI:
		fcport->fw_login_state = DSC_LS_PRLI_PEND;
636
		fcport->deleted = 0;
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
		c = "PRLI";
		break;
	case SRB_NACK_LOGO:
		fcport->fw_login_state = DSC_LS_LOGO_PEND;
		c = "LOGO";
		break;
	}

	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
	if (!sp)
		goto done;

	sp->type = type;
	sp->name = "nack";

	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);

	sp->u.iocb_cmd.u.nack.ntfy = ntfy;

	sp->done = qla2x00_async_nack_sp_done;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS)
		goto done_free_sp;

662 663 664
	ql_dbg(ql_dbg_disc, vha, 0x20f4,
	    "Async-%s %8phC hndl %x %s\n",
	    sp->name, fcport->port_name, sp->handle, c);
665 666 667 668

	return rval;

done_free_sp:
669
	sp->free(sp);
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
done:
	fcport->flags &= ~FCF_ASYNC_SENT;
	return rval;
}

void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
	fc_port_t *t;
	unsigned long flags;

	switch (e->u.nack.type) {
	case SRB_NACK_PRLI:
		mutex_lock(&vha->vha_tgt.tgt_mutex);
		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
		if (t) {
686
			ql_log(ql_log_info, vha, 0xd034,
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
			    "%s create sess success %p", __func__, t);
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
			/* create sess has an extra kref */
			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
		}
		break;
	}
	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
}

void qla24xx_delete_sess_fn(struct work_struct *work)
{
	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
	struct qla_hw_data *ha = fcport->vha->hw;
	unsigned long flags;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);

	if (fcport->se_sess) {
		ha->tgt.tgt_ops->shutdown_sess(fcport);
		ha->tgt.tgt_ops->put_sess(fcport);
	} else {
		qlt_unreg_sess(fcport);
	}
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

/*
 * Called from qla2x00_reg_remote_port()
 */
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct fc_port *sess = fcport;
	unsigned long flags;

	if (!vha->hw->tgt.tgt_ops)
		return;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (tgt->tgt_stop) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (fcport->disc_state == DSC_DELETE_PEND) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (!sess->se_sess) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		mutex_lock(&vha->vha_tgt.tgt_mutex);
		sess = qlt_create_sess(vha, fcport, false);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	} else {
		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		if (!kref_get_unless_zero(&sess->sess_kref)) {
755
			ql_dbg(ql_dbg_disc, vha, 0x2107,
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
			    "%s: kref_get fail sess %8phC \n",
			    __func__, sess->port_name);
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
		    "qla_target(%u): %ssession for port %8phC "
		    "(loop ID %d) reappeared\n", vha->vp_idx,
		    sess->local ? "local " : "", sess->port_name, sess->loop_id);

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
		    "Reappeared sess %p\n", sess);

		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
		    fcport->loop_id,
		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
	}

	if (sess && sess->local) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
		    "qla_target(%u): local session for "
		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
		    fcport->port_name, sess->loop_id);
		sess->local = 0;
	}
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
785 786 787 788 789 790

/*
 * This is a zero-base ref-counting solution, since hardware_lock
 * guarantees that ref_count is not modified concurrently.
 * Upon successful return content of iocb is undefined
 */
791
static struct qlt_plogi_ack_t *
792 793 794
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
		       struct imm_ntfy_from_isp *iocb)
{
795
	struct qlt_plogi_ack_t *pla;
796 797 798 799

	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
		if (pla->id.b24 == id->b24) {
			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
800
			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
801 802 803 804 805 806 807 808 809 810 811 812
			return pla;
		}
	}

	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
	if (!pla) {
		ql_dbg(ql_dbg_async, vha, 0x5088,
		       "qla_target(%d): Allocation of plogi_ack failed\n",
		       vha->vp_idx);
		return NULL;
	}

813
	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
814 815 816 817 818 819
	pla->id = *id;
	list_add_tail(&pla->list, &vha->plogi_ack_list);

	return pla;
}

820
void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
821
    struct qlt_plogi_ack_t *pla)
822
{
823
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
824 825 826 827
	port_id_t port_id;
	uint16_t loop_id;
	fc_port_t *fcport = pla->fcport;

828 829 830 831 832 833
	BUG_ON(!pla->ref_count);
	pla->ref_count--;

	if (pla->ref_count)
		return;

834
	ql_dbg(ql_dbg_disc, vha, 0x5089,
835
	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
836 837 838 839 840
	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
	    iocb->u.isp24.port_id[0],
	    le16_to_cpu(iocb->u.isp24.nport_handle),
	    iocb->u.isp24.exchange_address, iocb->ox_id);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	fcport->loop_id = loop_id;
	fcport->d_id = port_id;
	qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
	}
859 860 861 862 863

	list_del(&pla->list);
	kmem_cache_free(qla_tgt_plogi_cachep, pla);
}

864
void
865 866
qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
    struct fc_port *sess, enum qlt_plogi_link_t link)
867
{
868
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
869 870 871
	/* Inc ref_count first because link might already be pointing at pla */
	pla->ref_count++;

872 873 874 875 876 877 878 879
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
		sess, link, sess->port_name,
		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		pla->ref_count, pla, link);

880 881 882
	if (sess->plogi_link[link])
		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);

883 884
	if (link == QLT_PLOGI_LINK_SAME_WWN)
		pla->fcport = sess;
885 886 887 888

	sess->plogi_link[link] = pla;
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
typedef struct {
	/* These fields must be initialized by the caller */
	port_id_t id;
	/*
	 * number of cmds dropped while we were waiting for
	 * initiator to ack LOGO initialize to 1 if LOGO is
	 * triggered by a command, otherwise, to 0
	 */
	int cmd_count;

	/* These fields are used by callee */
	struct list_head list;
} qlt_port_logo_t;

static void
qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
{
	qlt_port_logo_t *tmp;
	int res;

	mutex_lock(&vha->vha_tgt.tgt_mutex);

	list_for_each_entry(tmp, &vha->logo_list, list) {
		if (tmp->id.b24 == logo->id.b24) {
			tmp->cmd_count += logo->cmd_count;
			mutex_unlock(&vha->vha_tgt.tgt_mutex);
			return;
		}
	}

	list_add_tail(&logo->list, &vha->logo_list);

	mutex_unlock(&vha->vha_tgt.tgt_mutex);

	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);

	mutex_lock(&vha->vha_tgt.tgt_mutex);
	list_del(&logo->list);
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

929 930 931 932
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
	    logo->cmd_count, res);
933 934
}

935 936
static void qlt_free_session_done(struct work_struct *work)
{
937
	struct fc_port *sess = container_of(work, struct fc_port,
938 939 940 941
	    free_work);
	struct qla_tgt *tgt = sess->tgt;
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
942 943
	unsigned long flags;
	bool logout_started = false;
944
	struct event_arg ea;
945
	scsi_qla_host_t *base_vha;
946 947 948

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
949
		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
950
		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
951
		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
952
		sess->logout_on_delete, sess->keep_nport_handle,
953
		sess->send_els_logo);
954

955

956
	if (!IS_SW_RESV_ADDR(sess->d_id)) {
957 958
		if (sess->send_els_logo) {
			qlt_port_logo_t logo;
959

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
			logo.id = sess->d_id;
			logo.cmd_count = 0;
			qlt_send_first_logo(vha, &logo);
		}

		if (sess->logout_on_delete) {
			int rc;

			rc = qla2x00_post_async_logout_work(vha, sess, NULL);
			if (rc != QLA_SUCCESS)
				ql_log(ql_log_warn, vha, 0xf085,
				    "Schedule logo failed sess %p rc %d\n",
				    sess, rc);
			else
				logout_started = true;
		}
976
	}
977

978 979 980 981 982 983
	/*
	 * Release the target session for FC Nexus from fabric module code.
	 */
	if (sess->se_sess != NULL)
		ha->tgt.tgt_ops->free_session(sess);

984 985 986 987 988 989 990 991 992 993 994 995 996
	if (logout_started) {
		bool traced = false;

		while (!ACCESS_ONCE(sess->logout_completed)) {
			if (!traced) {
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
					"%s: waiting for sess %p logout\n",
					__func__, sess);
				traced = true;
			}
			msleep(100);
		}

997
		ql_dbg(ql_dbg_disc, vha, 0xf087,
998 999 1000 1001 1002 1003 1004
		    "%s: sess %p logout completed\n",__func__, sess);
	}

	if (sess->logo_ack_needed) {
		sess->logo_ack_needed = 0;
		qla24xx_async_notify_ack(vha, sess,
			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1005 1006
	}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (sess->se_sess) {
		sess->se_sess = NULL;
		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
			tgt->sess_count--;
	}

	sess->disc_state = DSC_DELETED;
	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
	sess->deleted = QLA_SESS_DELETED;
	sess->login_retry = vha->hw->login_retry_count;

	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
		vha->fcport_count--;
		sess->login_succ = 0;
	}

	if (sess->chip_reset != sess->vha->hw->chip_reset)
		qla2x00_clear_loop_id(sess);

	if (sess->conflict) {
		sess->conflict->login_pause = 0;
		sess->conflict = NULL;
		if (!test_bit(UNLOADING, &vha->dpc_flags))
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
	}

1034
	{
1035
		struct qlt_plogi_ack_t *own =
1036
		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1037
		struct qlt_plogi_ack_t *con =
1038
		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1039
		struct imm_ntfy_from_isp *iocb;
1040 1041

		if (con) {
1042
			iocb = &con->iocb;
1043
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1044 1045 1046 1047 1048 1049
				 "se_sess %p / sess %p port %8phC is gone,"
				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
				 sess->se_sess, sess, sess->port_name,
				 own ? "releasing own PLOGI" : "no own PLOGI pending",
				 own ? own->ref_count : -1,
				 iocb->u.isp24.port_name, con->ref_count);
1050
			qlt_plogi_ack_unref(vha, con);
1051
			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1052 1053 1054 1055 1056 1057 1058 1059 1060
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
			    sess->se_sess, sess, sess->port_name,
			    own ? "releasing own PLOGI" :
			    "no own PLOGI pending",
			    own ? own->ref_count : -1);
		}

1061 1062
		if (own) {
			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1063
			qlt_plogi_ack_unref(vha, own);
1064 1065
			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		}
1066
	}
1067 1068
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1069
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1070 1071
	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
		sess, sess->port_name, vha->fcport_count);
1072

1073
	if (tgt && (tgt->sess_count == 0))
1074
		wake_up_all(&tgt->waitQ);
1075 1076 1077 1078

	if (vha->fcport_count == 0)
		wake_up_all(&vha->fcport_waitQ);

1079 1080 1081 1082
	base_vha = pci_get_drvdata(ha->pdev);
	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
		return;

1083 1084 1085 1086 1087 1088
	if (!tgt || !tgt->tgt_stop) {
		memset(&ea, 0, sizeof(ea));
		ea.event = FCME_DELETE_DONE;
		ea.fcport = sess;
		qla2x00_fcport_event_handler(vha, &ea);
	}
1089 1090
}

1091
/* ha->tgt.sess_lock supposed to be held on entry */
1092
void qlt_unreg_sess(struct fc_port *sess)
1093 1094 1095
{
	struct scsi_qla_host *vha = sess->vha;

1096
	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1097 1098 1099
	    "%s sess %p for deletion %8phC\n",
	    __func__, sess, sess->port_name);

1100 1101
	if (sess->se_sess)
		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1102

1103 1104
	qla2x00_mark_device_lost(vha, sess, 1, 1);

1105
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1106 1107 1108
	sess->disc_state = DSC_DELETE_PEND;
	sess->last_rscn_gen = sess->rscn_gen;
	sess->last_login_gen = sess->login_gen;
1109 1110 1111 1112

	INIT_WORK(&sess->free_work, qlt_free_session_done);
	schedule_work(&sess->free_work);
}
1113
EXPORT_SYMBOL(qlt_unreg_sess);
1114

1115 1116 1117
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
	struct qla_hw_data *ha = vha->hw;
1118
	struct fc_port *sess = NULL;
1119 1120 1121
	uint16_t loop_id;
	int res = 0;
	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1122
	unsigned long flags;
1123 1124 1125 1126

	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
	if (loop_id == 0xFFFF) {
		/* Global event */
1127
		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1128
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1129
		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1130
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1131
	} else {
1132
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1133
		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1134
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe000,
	    "Using sess for qla_tgt_reset: %p\n", sess);
	if (!sess) {
		res = -ESRCH;
		return res;
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1145 1146
	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1147 1148
	    mcmd, loop_id);

1149
	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1150 1151
}

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
	if (sess->chip_reset != sess->vha->hw->chip_reset) {
		sess->logout_on_delete = 0;
		sess->logo_ack_needed = 0;
		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
		sess->scan_state = 0;
	}
}

1162
/* ha->tgt.sess_lock supposed to be held on entry */
1163
void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1164 1165 1166 1167
	bool immediate)
{
	struct qla_tgt *tgt = sess->tgt;

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (sess->disc_state == DSC_DELETE_PEND)
		return;

	if (sess->disc_state == DSC_DELETED) {
		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
			wake_up_all(&tgt->waitQ);
		if (sess->vha->fcport_count == 0)
			wake_up_all(&sess->vha->fcport_waitQ);

		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1179 1180
			return;
	}
1181

1182
	sess->disc_state = DSC_DELETE_PEND;
1183

1184 1185 1186 1187 1188
	if (sess->deleted == QLA_SESS_DELETED)
		sess->logout_on_delete = 0;

	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
	qla24xx_chk_fcp_state(sess);
1189

1190 1191
	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
	    "Scheduling sess %p for deletion\n", sess);
1192

1193 1194
	schedule_work(&sess->del_work);
}
1195

1196 1197 1198 1199 1200 1201 1202
void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
{
	unsigned long flags;
	struct qla_hw_data *ha = sess->vha->hw;
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	qlt_schedule_sess_for_deletion(sess, 1);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1203 1204
}

1205
/* ha->tgt.sess_lock supposed to be held on entry */
J
Joern Engel 已提交
1206
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1207
{
1208 1209
	struct fc_port *sess;
	scsi_qla_host_t *vha = tgt->vha;
1210

1211 1212
	list_for_each_entry(sess, &vha->vp_fcports, list) {
		if (sess->se_sess)
1213
			qlt_schedule_sess_for_deletion(sess, 1);
1214
	}
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	/* At this point tgt could be already dead */
}

static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
	uint16_t *loop_id)
{
	struct qla_hw_data *ha = vha->hw;
	dma_addr_t gid_list_dma;
	struct gid_list_info *gid_list;
	char *id_iter;
	int res, rc, i;
	uint16_t entries;

	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    &gid_list_dma, GFP_KERNEL);
	if (!gid_list) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
		    "qla_target(%d): DMA Alloc failed of %u\n",
		    vha->vp_idx, qla2x00_gid_list_size(ha));
		return -ENOMEM;
	}

	/* Get list of logged in devices */
1239
	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1240 1241 1242 1243
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
		    "qla_target(%d): get_id_list() failed: %x\n",
		    vha->vp_idx, rc);
1244
		res = -EBUSY;
1245 1246 1247 1248
		goto out_free_id_list;
	}

	id_iter = (char *)gid_list;
1249
	res = -ENOENT;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	for (i = 0; i < entries; i++) {
		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
		if ((gid->al_pa == s_id[2]) &&
		    (gid->area == s_id[1]) &&
		    (gid->domain == s_id[0])) {
			*loop_id = le16_to_cpu(gid->loop_id);
			res = 0;
			break;
		}
		id_iter += ha->gid_list_info_size;
	}

out_free_id_list:
	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    gid_list, gid_list_dma);
	return res;
}

/*
 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
 * Caller must put it.
 */
1272
static struct fc_port *qlt_create_sess(
1273 1274 1275 1276 1277
	struct scsi_qla_host *vha,
	fc_port_t *fcport,
	bool local)
{
	struct qla_hw_data *ha = vha->hw;
1278
	struct fc_port *sess = fcport;
1279 1280
	unsigned long flags;

1281 1282
	if (vha->vha_tgt.qla_tgt->tgt_stop)
		return NULL;
1283

1284 1285
	if (fcport->se_sess) {
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1286
			ql_dbg(ql_dbg_disc, vha, 0x20f6,
1287 1288 1289
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
1290
		}
1291
		return fcport;
1292
	}
1293
	sess->tgt = vha->vha_tgt.qla_tgt;
1294
	sess->local = local;
1295

1296 1297
	/*
	 * Under normal circumstances we want to logout from firmware when
1298 1299
	 * session eventually ends and release corresponding nport handle.
	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1300 1301
	 * code will adjust these flags as necessary.
	 */
1302 1303
	sess->logout_on_delete = 1;
	sess->keep_nport_handle = 0;
1304
	sess->logout_completed = 0;
1305

1306 1307
	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
	    &fcport->port_name[0], sess) < 0) {
1308
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		    "(%d) %8phC check_initiator_node_acl failed\n",
		    vha->vp_idx, fcport->port_name);
		return NULL;
	} else {
		kref_init(&fcport->sess_kref);
		/*
		 * Take an extra reference to ->sess_kref here to handle
		 * fc_port access across ->tgt.sess_lock reaquire.
		 */
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1319
			ql_dbg(ql_dbg_disc, vha, 0x20f7,
1320 1321 1322 1323
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
		}
1324

1325 1326 1327
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		if (!IS_SW_RESV_ADDR(sess->d_id))
			vha->vha_tgt.qla_tgt->sess_count++;
1328

1329 1330 1331 1332 1333 1334 1335 1336
		qlt_do_generation_tick(vha, &sess->generation);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
	    vha->vha_tgt.qla_tgt->sess_count);
1337 1338

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1339 1340 1341
	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1342 1343
	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1344 1345 1346 1347

	return sess;
}

1348 1349 1350 1351 1352 1353
/*
 * max_gen - specifies maximum session generation
 * at which this deletion requestion is still valid
 */
void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1354
{
1355
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1356
	struct fc_port *sess = fcport;
1357
	unsigned long flags;
1358 1359 1360 1361

	if (!vha->hw->tgt.tgt_ops)
		return;

1362
	if (!tgt)
1363 1364
		return;

1365
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1366
	if (tgt->tgt_stop) {
1367
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1368 1369
		return;
	}
1370
	if (!sess->se_sess) {
1371
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1372 1373 1374
		return;
	}

1375
	if (max_gen - sess->generation < 0) {
1376
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1377 1378 1379 1380 1381 1382 1383 1384
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
		    "Ignoring stale deletion request for se_sess %p / sess %p"
		    " for port %8phC, req_gen %d, sess_gen %d\n",
		    sess->se_sess, sess, sess->port_name, max_gen,
		    sess->generation);
		return;
	}

1385 1386 1387 1388
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);

	sess->local = 1;
	qlt_schedule_sess_for_deletion(sess, false);
1389
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
}

static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;
	int res;
	/*
	 * We need to protect against race, when tgt is freed before or
	 * inside wake_up()
	 */
1401
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1402
	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1403 1404
	    "tgt %p, sess_count=%d\n",
	    tgt, tgt->sess_count);
1405
	res = (tgt->sess_count == 0);
1406
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1407 1408 1409 1410 1411

	return res;
}

/* Called by tcm_qla2xxx configfs code */
1412
int qlt_stop_phase1(struct qla_tgt *tgt)
1413 1414 1415 1416 1417
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	mutex_lock(&qla_tgt_mutex);
	if (!vha->fc_vport) {
		struct Scsi_Host *sh = vha->host;
		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
		bool npiv_vports;

		spin_lock_irqsave(sh->host_lock, flags);
		npiv_vports = (fc_host->npiv_vports_inuse);
		spin_unlock_irqrestore(sh->host_lock, flags);

		if (npiv_vports) {
			mutex_unlock(&qla_tgt_mutex);
1430 1431
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
			    "NPIV is in use. Can not stop target\n");
1432 1433 1434
			return -EPERM;
		}
	}
1435 1436 1437
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1438 1439
		mutex_unlock(&qla_tgt_mutex);
		return -EPERM;
1440 1441
	}

1442
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1443 1444 1445 1446 1447
	    vha->host_no, vha);
	/*
	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
	 * Lock is needed, because we still can get an incoming packet.
	 */
1448
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1449
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1450
	tgt->tgt_stop = 1;
J
Joern Engel 已提交
1451
	qlt_clear_tgt_db(tgt);
1452
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1453
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1454
	mutex_unlock(&qla_tgt_mutex);
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
	    "Waiting for sess works (tgt %p)", tgt);
	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
		flush_scheduled_work();
		spin_lock_irqsave(&tgt->sess_work_lock, flags);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1467
	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1468 1469 1470 1471

	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));

	/* Big hammer */
1472 1473
	if (!ha->flags.host_shutting_down &&
	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1474 1475 1476 1477
		qlt_disable_vha(vha);

	/* Wait for sessions to clear out (just in case) */
	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1478
	return 0;
1479 1480 1481 1482 1483 1484
}
EXPORT_SYMBOL(qlt_stop_phase1);

/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
1485
	scsi_qla_host_t *vha = tgt->vha;
1486 1487

	if (tgt->tgt_stopped) {
1488
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1489 1490 1491 1492
		    "Already in tgt->tgt_stopped state\n");
		dump_stack();
		return;
	}
1493 1494 1495 1496 1497 1498
	if (!tgt->tgt_stop) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
		    "%s: phase1 stop is not completed\n", __func__);
		dump_stack();
		return;
	}
1499

1500
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1501 1502
	tgt->tgt_stop = 0;
	tgt->tgt_stopped = 1;
1503
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1504

1505
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1506 1507 1508 1509 1510
	    tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);

/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1511
static void qlt_release(struct qla_tgt *tgt)
1512
{
1513
	scsi_qla_host_t *vha = tgt->vha;
1514

1515 1516 1517 1518
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
	    !tgt->tgt_stopped)
		qlt_stop_phase1(tgt);

1519
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1520 1521
		qlt_stop_phase2(tgt);

1522
	vha->vha_tgt.qla_tgt = NULL;
1523

1524
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	    "Release of tgt %p finished\n", tgt);

	kfree(tgt);
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
	const void *param, unsigned int param_size)
{
	struct qla_tgt_sess_work_param *prm;
	unsigned long flags;

	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
	if (!prm) {
		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
		    "qla_target(%d): Unable to create session "
		    "work, command will be refused", 0);
		return -ENOMEM;
	}

	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
	    "Scheduling work (type %d, prm %p)"
	    " to find session for param %p (size %d, tgt %p)\n",
	    type, prm, param, param_size, tgt);

	prm->type = type;
	memcpy(&prm->tm_iocb, param, param_size);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	schedule_work(&tgt->sess_work);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	struct nack_to_isp *nack;

1574 1575 1576
	if (!ha->flags.fw_started)
		return;

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
	if (!pkt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe049,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1591 1592
	if (vha->vha_tgt.qla_tgt != NULL)
		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1593 1594 1595 1596 1597 1598 1599

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

1600
	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1601 1602 1603
	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
1604
			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1605 1606 1607 1608
	}
	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1609
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
	nack->u.isp24.srr_reject_code = srr_reject_code;
	nack->u.isp24.srr_reject_code_expl = srr_explan;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	ql_dbg(ql_dbg_tgt, vha, 0xe005,
	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
	    vha->vp_idx, nack->u.isp24.status);

1622 1623
	/* Memory Barrier */
	wmb();
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts, uint32_t status,
	bool ids_reversed)
{
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl;
	uint8_t *p;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
	    ha, abts, status);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1647
	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
	if (!resp) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
		return;
	}

	resp->entry_type = ABTS_RESP_24XX;
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
1662
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;
	if (ids_reversed) {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
	} else {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
	}
	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (status == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

1699
	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1700

1701 1702
	/* Memory Barrier */
	wmb();
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
	struct abts_resp_from_24xx_fw *entry)
{
	struct ctio7_to_24xx *ctio;

	ql_dbg(ql_dbg_tgt, vha, 0xe007,
	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1720
	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

	/*
	 * We've got on entrance firmware's response on by us generated
	 * ABTS response. So, in it ID fields are reversed.
	 */

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->nport_handle = entry->nport_handle;
	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1737
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1738 1739 1740 1741 1742
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
	ctio->exchange_addr = entry->exchange_addr_to_abort;
1743 1744
	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
					    CTIO7_FLAGS_TERMINATE);
1745
	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1746

1747 1748
	/* Memory Barrier */
	wmb();
1749 1750 1751 1752 1753 1754
	qla2x00_start_iocbs(vha, vha->req);

	qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
	    FCP_TMF_CMPL, true);
}

1755 1756 1757 1758
static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
1759
	unsigned long flags;
1760

1761
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1762 1763 1764
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1765
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1766 1767 1768 1769
			return 1;
		}
	}

1770 1771 1772
	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1773
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1774 1775 1776 1777
			return 1;
		}
	}

1778 1779
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		if (tag == cmd->atio.u.isp24.exchange_addr) {
1780
			cmd->aborted = 1;
1781
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1782 1783 1784
			return 1;
		}
	}
1785
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795

	return 0;
}

/* drop cmds for the given lun
 * XXX only looks for cmds on the port through which lun reset was recieved
 * XXX does not go through the list of other port (which may have cmds
 *     for the same lun)
 */
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1796
			        u64 lun, uint8_t *s_id)
1797 1798 1799 1800
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
1801
	unsigned long flags;
1802 1803

	key = sid_to_key(s_id);
1804
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1805 1806
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key;
1807
		u64 op_lun;
1808 1809 1810 1811 1812 1813 1814

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key;
		u64 op_lun;

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}

1827 1828
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key;
1829
		u64 cmd_lun;
1830 1831 1832 1833 1834

		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		cmd_lun = scsilun_to_int(
			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
		if (cmd_key == key && cmd_lun == lun)
1835
			cmd->aborted = 1;
1836
	}
1837
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1838 1839
}

1840 1841
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1842
	struct abts_recv_from_24xx *abts, struct fc_port *sess)
1843 1844
{
	struct qla_hw_data *ha = vha->hw;
1845
	struct se_session *se_sess = sess->se_sess;
1846
	struct qla_tgt_mgmt_cmd *mcmd;
1847
	struct qla_tgt_cmd *cmd;
1848
	struct se_cmd *se_cmd;
1849
	int rc;
1850
	bool found_lun = false;
1851
	unsigned long flags;
1852

1853
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1854
	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1855
		if (se_cmd->tag == abts->exchange_addr_to_abort) {
1856 1857 1858 1859
			found_lun = true;
			break;
		}
	}
1860
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1861

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
	/* cmd not in LIO lists, look in qla list */
	if (!found_lun) {
		if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
			/* send TASK_ABORT response immediately */
			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
			return 0;
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
			    "unable to find cmd in driver or LIO for tag 0x%x\n",
			    abts->exchange_addr_to_abort);
			return -ENOENT;
		}
	}
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
	    "qla_target(%d): task abort (tag=%d)\n",
	    vha->vp_idx, abts->exchange_addr_to_abort);

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

1889
	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1890 1891
	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1892
	mcmd->reset_count = vha->hw->chip_reset;
1893
	mcmd->tmr_func = QLA_TGT_ABTS;
1894

1895
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	    abts->exchange_addr_to_abort);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
		    "qla_target(%d):  tgt_ops->handle_tmr()"
		    " failed: %d", vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts)
{
	struct qla_hw_data *ha = vha->hw;
1915
	struct fc_port *sess;
1916 1917 1918
	uint32_t tag = abts->exchange_addr_to_abort;
	uint8_t s_id[3];
	int rc;
1919
	unsigned long flags;
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946

	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
		    "qla_target(%d): ABTS: Abort Sequence not "
		    "supported\n", vha->vp_idx);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
		    "qla_target(%d): ABTS: Unknown Exchange "
		    "Address received\n", vha->vp_idx);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
	    le32_to_cpu(abts->fcp_hdr_le.parameter));

	s_id[0] = abts->fcp_hdr_le.s_id[2];
	s_id[1] = abts->fcp_hdr_le.s_id[1];
	s_id[2] = abts->fcp_hdr_le.s_id[0];

1947
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1948 1949 1950 1951 1952
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
		    "qla_target(%d): task abort for non-existant session\n",
		    vha->vp_idx);
1953
		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1954
		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1955 1956 1957

		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1958 1959 1960 1961 1962 1963
		if (rc != 0) {
			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
			    false);
		}
		return;
	}
1964 1965
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1966

1967
	if (sess->deleted) {
1968 1969 1970 1971
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	rc = __qlt_24xx_handle_abts(vha, abts, sess);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
		    vha->vp_idx, rc);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
	struct ctio7_to_24xx *ctio;
1990
	uint16_t temp;
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011

	ql_dbg(ql_dbg_tgt, ha, 0xe008,
	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
	    ha, atio, resp_code);

	/* Send marker if required */
	if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
		return;

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", ha->vp_idx, __func__);
		return;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
	ctio->nport_handle = mcmd->sess->loop_id;
2012
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2013 2014 2015 2016 2017
	ctio->vp_index = ha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2018 2019 2020
	temp = (atio->u.isp24.attr << 9)|
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2021 2022
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
2023
	ctio->u.status1.scsi_status =
2024 2025
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
	ctio->u.status1.response_len = cpu_to_le16(8);
2026
	ctio->u.status1.sense_data[0] = resp_code;
2027

2028 2029
	/* Memory Barrier */
	wmb();
2030 2031 2032 2033 2034 2035 2036 2037 2038
	qla2x00_start_iocbs(ha, ha->req);
}

void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
}
EXPORT_SYMBOL(qlt_free_mcmd);

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then
 * reacquire
 */
void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
	struct atio_from_isp *atio = &cmd->atio;
	struct ctio7_to_24xx *ctio;
	uint16_t temp;

	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
	    "sense_key=%02x, asc=%02x, ascq=%02x",
	    vha, atio, scsi_status, sense_key, asc, ascq);

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
	if (!ctio) {
		ql_dbg(ql_dbg_async, vha, 0x3067,
		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
		    vha->host_no, __func__);
		goto out;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE;
	ctio->nport_handle = cmd->sess->loop_id;
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2073 2074 2075
	temp = (atio->u.isp24.attr << 9) |
	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
	ctio->u.status1.scsi_status =
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
	ctio->u.status1.response_len = cpu_to_le16(18);
	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));

	if (ctio->u.status1.residual != 0)
		ctio->u.status1.scsi_status |=
		    cpu_to_le16(SS_RESIDUAL_UNDER);

	/* Response code and sense key */
	put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
	    (&ctio->u.status1.sense_data)[0]);
	/* Additional sense length */
	put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
	/* ASC and ASCQ */
	put_unaligned_le32(((asc << 24) | (ascq << 16)),
	    (&ctio->u.status1.sense_data)[3]);

	/* Memory Barrier */
	wmb();

	qla2x00_start_iocbs(vha, vha->req);
out:
	return;
}

2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
	struct scsi_qla_host *vha = mcmd->sess->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
	    "TM response mcmd (%p) status %#x state %#x",
	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
2116

2117
	if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
2118
		/*
2119
		 * Either the port is not online or this request was from
2120 2121 2122
		 * previous life, just abort the processing.
		 */
		ql_dbg(ql_dbg_async, vha, 0xe100,
2123 2124 2125
			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			mcmd->reset_count, ha->chip_reset);
2126 2127 2128 2129 2130
		ha->tgt.tgt_ops->free_mcmd(mcmd);
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return;
	}

2131 2132
	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2133 2134 2135 2136 2137
		    ELS_LOGO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_PRLO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_TPRLO) {
2138
			ql_dbg(ql_dbg_disc, vha, 0x2106,
2139 2140 2141 2142 2143 2144 2145 2146 2147
			    "TM response logo %phC status %#x state %#x",
			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
			    mcmd->flags);
			qlt_schedule_sess_for_deletion_lock(mcmd->sess);
		} else {
			qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
				0, 0, 0, 0, 0, 0);
		}
	} else {
2148
		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
			qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
			    mcmd->fc_tm_rsp, false);
		else
			qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
			    mcmd->fc_tm_rsp);
	}
	/*
	 * Make the callback for ->free_mcmd() to queue_work() and invoke
	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
	 * qlt_xmit_tm_rsp() returns here..
	 */
	ha->tgt.tgt_ops->free_mcmd(mcmd);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);

/* No locks */
static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd = prm->cmd;

	BUG_ON(cmd->sg_cnt == 0);

	prm->sg = (struct scatterlist *)cmd->sg;
	prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
	    cmd->sg_cnt, cmd->dma_data_direction);
	if (unlikely(prm->seg_cnt == 0))
		goto out_err;

	prm->cmd->sg_mapped = 1;

2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
		/*
		 * If greater than four sg entries then we need to allocate
		 * the continuation entries
		 */
		if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
			prm->tgt->datasegs_per_cmd,
			prm->tgt->datasegs_per_cont);
	} else {
		/* DIF */
		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
			prm->tot_dsds = prm->seg_cnt;
		} else
			prm->tot_dsds = prm->seg_cnt;

		if (cmd->prot_sg_cnt) {
			prm->prot_sg      = cmd->prot_sg;
			prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
				cmd->prot_sg, cmd->prot_sg_cnt,
				cmd->dma_data_direction);
			if (unlikely(prm->prot_seg_cnt == 0))
				goto out_err;

			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
				/* Dif Bundling not support here */
				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
								cmd->blk_sz);
				prm->tot_dsds += prm->prot_seg_cnt;
			} else
				prm->tot_dsds += prm->prot_seg_cnt;
		}
	}
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228

	return 0;

out_err:
	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
	    0, prm->cmd->sg_cnt);
	return -1;
}

J
Joern Engel 已提交
2229
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2230 2231 2232
{
	struct qla_hw_data *ha = vha->hw;

J
Joern Engel 已提交
2233 2234 2235
	if (!cmd->sg_mapped)
		return;

2236 2237
	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
	cmd->sg_mapped = 0;
2238 2239 2240 2241 2242

	if (cmd->prot_sg_cnt)
		pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
			cmd->dma_data_direction);

2243 2244 2245
	if (!cmd->ctx)
		return;

2246
	if (cmd->ctx_dsd_alloced)
2247
		qla2x00_clean_dsd_pool(ha, cmd->ctx);
2248

2249
	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2250 2251 2252 2253 2254
}

static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
	uint32_t req_cnt)
{
2255
	uint32_t cnt, cnt_in;
2256 2257

	if (vha->req->cnt < (req_cnt + 2)) {
2258
		cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
2259
		cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
2260 2261 2262 2263 2264 2265 2266

		if  (vha->req->ring_index < cnt)
			vha->req->cnt = cnt - vha->req->ring_index;
		else
			vha->req->cnt = vha->req->length -
			    (vha->req->ring_index - cnt);

2267 2268 2269 2270 2271 2272 2273 2274
		if (unlikely(vha->req->cnt < (req_cnt + 2))) {
			ql_dbg(ql_dbg_io, vha, 0x305a,
			    "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
			    vha->vp_idx, vha->req->ring_index,
			    vha->req->cnt, req_cnt, cnt, cnt_in,
			    vha->req->length);
			return -EAGAIN;
		}
2275
	}
2276

2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
	vha->req->cnt -= req_cnt;

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
{
	/* Adjust ring index. */
	vha->req->ring_index++;
	if (vha->req->ring_index == vha->req->length) {
		vha->req->ring_index = 0;
		vha->req->ring_ptr = vha->req->ring;
	} else {
		vha->req->ring_ptr++;
	}
	return (cont_entry_t *)vha->req->ring_ptr;
}

/* ha->hardware_lock supposed to be held on entry */
static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t h;

	h = ha->tgt.current_handle;
	/* always increment cmd handle */
	do {
		++h;
2308
		if (h > DEFAULT_OUTSTANDING_COMMANDS)
2309 2310
			h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
		if (h == ha->tgt.current_handle) {
2311
			ql_dbg(ql_dbg_io, vha, 0x305b,
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
			    "qla_target(%d): Ran out of "
			    "empty cmd slots in ha %p\n", vha->vp_idx, ha);
			h = QLA_TGT_NULL_HANDLE;
			break;
		}
	} while ((h == QLA_TGT_NULL_HANDLE) ||
	    (h == QLA_TGT_SKIP_HANDLE) ||
	    (ha->tgt.cmds[h-1] != NULL));

	if (h != QLA_TGT_NULL_HANDLE)
		ha->tgt.current_handle = h;

	return h;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	uint32_t h;
	struct ctio7_to_24xx *pkt;
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *atio = &prm->cmd->atio;
2335
	uint16_t temp;
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353

	pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	pkt->entry_type = CTIO_TYPE7;
	pkt->entry_count = (uint8_t)prm->req_cnt;
	pkt->vp_index = vha->vp_idx;

	h = qlt_make_handle(vha);
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
2354
		ha->tgt.cmds[h - 1] = prm->cmd;
2355 2356 2357

	pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
	pkt->nport_handle = prm->cmd->loop_id;
2358
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2359 2360 2361 2362
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr = atio->u.isp24.exchange_addr;
2363 2364
	temp = atio->u.isp24.attr << 9;
	pkt->u.status0.flags |= cpu_to_le16(temp);
2365 2366
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->u.status0.ox_id = cpu_to_le16(temp);
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;

	/* Build continuation packets */
	while (prm->seg_cnt > 0) {
		cont_a64_entry_t *cont_pkt64 =
			(cont_a64_entry_t *)qlt_get_req_pkt(vha);

		/*
		 * Make sure that from cont_pkt64 none of
		 * 64-bit specific fields used for 32-bit
		 * addressing. Cast to (cont_entry_t *) for
		 * that.
		 */

		memset(cont_pkt64, 0, sizeof(*cont_pkt64));

		cont_pkt64->entry_count = 1;
		cont_pkt64->sys_define = 0;

		if (enable_64bit_addressing) {
			cont_pkt64->entry_type = CONTINUE_A64_TYPE;
			dword_ptr =
			    (uint32_t *)&cont_pkt64->dseg_0_address;
		} else {
			cont_pkt64->entry_type = CONTINUE_TYPE;
			dword_ptr =
			    (uint32_t *)&((cont_entry_t *)
				cont_pkt64)->dseg_0_address;
		}

		/* Load continuation entry data segments */
		for (cnt = 0;
		    cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
		    cnt++, prm->seg_cnt--) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_lo32
				(sg_dma_address(prm->sg)));
			if (enable_64bit_addressing) {
				*dword_ptr++ =
				    cpu_to_le32(pci_dma_hi32
					(sg_dma_address
					(prm->sg)));
			}
			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

			prm->sg = sg_next(prm->sg);
		}
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;

	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);

	/* Setup packet address segment pointer */
	dword_ptr = pkt24->u.status0.dseg_0_address;

	/* Set total data segment count */
	if (prm->seg_cnt)
		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);

	if (prm->seg_cnt == 0) {
		/* No data transfer */
		*dword_ptr++ = 0;
		*dword_ptr = 0;
		return;
	}

	/* If scatter gather */

	/* Load command entry data segments */
	for (cnt = 0;
	    (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
	    cnt++, prm->seg_cnt--) {
		*dword_ptr++ =
		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
		if (enable_64bit_addressing) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_hi32(
				sg_dma_address(prm->sg)));
		}
		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

		prm->sg = sg_next(prm->sg);
	}

	qlt_load_cont_data_segments(prm, vha);
}

static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
{
	return cmd->bufflen > 0;
}

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
static void qlt_print_dif_err(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd;
	struct scsi_qla_host *vha;

	/* asc 0x10=dif error */
	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
		cmd = prm->cmd;
		vha = cmd->vha;
		/* ASCQ */
		switch (prm->sense_buffer[13]) {
		case 1:
2497
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2498 2499 2500 2501 2502 2503
			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 2:
2504
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2505 2506 2507 2508 2509 2510
			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 3:
2511
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2512 2513 2514 2515 2516 2517
			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		default:
2518
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2519 2520 2521 2522 2523 2524
			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		}
2525
		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2526 2527 2528
	}
}

2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
/*
 * Called without ha->hardware_lock held
 */
static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
	uint32_t *full_req_cnt)
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd = &cmd->se_cmd;

	prm->cmd = cmd;
	prm->tgt = tgt;
	prm->rq_result = scsi_status;
	prm->sense_buffer = &cmd->sense_buffer[0];
	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
	prm->sg = NULL;
	prm->seg_cnt = -1;
	prm->req_cnt = 1;
	prm->add_status_pkt = 0;

	/* Send marker if required */
	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
		return -EFAULT;

	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
		if  (qlt_pci_map_calc_cnt(prm) != 0)
			return -EAGAIN;
	}

	*full_req_cnt = prm->req_cnt;

	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2564
		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2565 2566 2567 2568
		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag,
		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
		       cmd->bufflen, prm->rq_result);
2569 2570 2571
		prm->rq_result |= SS_RESIDUAL_UNDER;
	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2572
		ql_dbg(ql_dbg_io, vha, 0x305d,
2573 2574 2575
		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
		prm->rq_result |= SS_RESIDUAL_OVER;
	}

	if (xmit_type & QLA_TGT_XMIT_STATUS) {
		/*
		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
		 * ignored in *xmit_response() below
		 */
		if (qlt_has_data(cmd)) {
			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
			    (IS_FWI2_CAPABLE(ha) &&
			    (prm->rq_result != 0))) {
				prm->add_status_pkt = 1;
				(*full_req_cnt)++;
			}
		}
	}

	return 0;
}

static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
	struct qla_tgt_cmd *cmd, int sending_sense)
{
	if (ha->tgt.enable_class_2)
		return 0;

	if (sending_sense)
		return cmd->conf_compl_supported;
	else
		return ha->tgt.enable_explicit_conf &&
		    cmd->conf_compl_supported;
}

static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
	struct qla_tgt_prm *prm)
{
	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2615
	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2616
	if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2617
		ctio->u.status0.flags |= cpu_to_le16(
2618 2619 2620 2621 2622 2623 2624 2625 2626
		    CTIO7_FLAGS_EXPLICIT_CONFORM |
		    CTIO7_FLAGS_CONFORM_REQ);
	}
	ctio->u.status0.residual = cpu_to_le32(prm->residual);
	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
		int i;

		if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2627
			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2628 2629 2630 2631 2632 2633
				ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
				    "Skipping EXPLICIT_CONFORM and "
				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
				    "non GOOD status\n");
				goto skip_explict_conf;
			}
2634
			ctio->u.status1.flags |= cpu_to_le16(
2635 2636 2637 2638 2639
			    CTIO7_FLAGS_EXPLICIT_CONFORM |
			    CTIO7_FLAGS_CONFORM_REQ);
		}
skip_explict_conf:
		ctio->u.status1.flags &=
2640
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2641
		ctio->u.status1.flags |=
2642
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2643
		ctio->u.status1.scsi_status |=
2644
		    cpu_to_le16(SS_SENSE_LEN_VALID);
2645 2646 2647 2648 2649
		ctio->u.status1.sense_length =
		    cpu_to_le16(prm->sense_buffer_len);
		for (i = 0; i < prm->sense_buffer_len/4; i++)
			((uint32_t *)ctio->u.status1.sense_data)[i] =
				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2650 2651 2652

		qlt_print_dif_err(prm);

2653 2654
	} else {
		ctio->u.status1.flags &=
2655
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2656
		ctio->u.status1.flags |=
2657
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2658 2659 2660 2661 2662 2663 2664 2665
		ctio->u.status1.sense_length = 0;
		memset(ctio->u.status1.sense_data, 0,
		    sizeof(ctio->u.status1.sense_data));
	}

	/* Sense with len > 24, is it possible ??? */
}

2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
		if (ql2xenablehba_err_chk >= 1)
			return 1;
		break;
	case TARGET_PROT_DOUT_PASS:
	case TARGET_PROT_DIN_PASS:
		if (ql2xenablehba_err_chk >= 2)
			return 1;
		break;
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		return 1;
	default:
		break;
	}
	return 0;
}

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
static inline int
qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
	    return 1;
	default:
	    return 0;
	}
	return 0;
}

2706
/*
2707
 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2708
 */
2709 2710 2711
static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
    uint16_t *pfw_prot_opts)
2712
{
2713
	struct se_cmd *se_cmd = &cmd->se_cmd;
2714
	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2715 2716 2717
	scsi_qla_host_t *vha = cmd->tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	uint32_t t32 = 0;
2718

2719 2720
	/*
	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2721 2722 2723
	 * have been immplemented by TCM, before AppTag is avail.
	 * Look for modesense_handlers[]
	 */
2724
	ctx->app_tag = 0;
2725 2726 2727
	ctx->app_tag_mask[0] = 0x0;
	ctx->app_tag_mask[1] = 0x0;

2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);

2738 2739 2740
	switch (se_cmd->prot_type) {
	case TARGET_DIF_TYPE0_PROT:
		/*
2741 2742 2743
		 * No check for ql2xenablehba_err_chk, as it
		 * would be an I/O error if hba tag generation
		 * is not done.
2744 2745 2746 2747 2748 2749 2750 2751 2752
		 */
		ctx->ref_tag = cpu_to_le32(lba);
		/* enable ALL bytes of the ref tag */
		ctx->ref_tag_mask[0] = 0xff;
		ctx->ref_tag_mask[1] = 0xff;
		ctx->ref_tag_mask[2] = 0xff;
		ctx->ref_tag_mask[3] = 0xff;
		break;
	case TARGET_DIF_TYPE1_PROT:
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768
	    /*
	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
	     * REF tag, and 16 bit app tag.
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2769
	case TARGET_DIF_TYPE2_PROT:
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
	    /*
	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
	     * tag has to match LBA in CDB + N
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2786
	case TARGET_DIF_TYPE3_PROT:
2787 2788 2789 2790 2791
	    /* For TYPE 3 protection: 16 bit GUARD only */
	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
	    break;
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
	}
}

static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
	uint32_t		*cur_dsd;
	uint32_t		transfer_length = 0;
	uint32_t		data_bytes;
	uint32_t		dif_bytes;
	uint8_t			bundling = 1;
	uint8_t			*clr_ptr;
	struct crc_context	*crc_ctx_pkt = NULL;
	struct qla_hw_data	*ha;
	struct ctio_crc2_to_fw	*pkt;
	dma_addr_t		crc_ctx_dma;
	uint16_t		fw_prot_opts = 0;
	struct qla_tgt_cmd	*cmd = prm->cmd;
	struct se_cmd		*se_cmd = &cmd->se_cmd;
	uint32_t h;
	struct atio_from_isp *atio = &prm->cmd->atio;
2813
	struct qla_tc_param	tc;
2814
	uint16_t t16;
2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838

	ha = vha->hw;

	pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	ql_dbg(ql_dbg_tgt, vha, 0xe071,
		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
		vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);

	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
		bundling = 0;

	/* Compute dif len and adjust data len to incude protection */
	data_bytes = cmd->bufflen;
	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		transfer_length = data_bytes;
2839 2840
		if (cmd->prot_sg_cnt)
			data_bytes += dif_bytes;
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		transfer_length = data_bytes + dif_bytes;
		break;
	default:
		BUG();
		break;
	}

	if (!qlt_hba_err_chk_enabled(se_cmd))
		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
	/* HBA error checking enabled */
	else if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
		fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
		fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		fw_prot_opts |= PO_MODE_DIF_PASS;
		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
		break;
	default:/* Normal Request */
		fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	}

	/* ---- PKT ---- */
	/* Update entry type to indicate Command Type CRC_2 IOCB */
	pkt->entry_type  = CTIO_CRC2;
	pkt->entry_count = 1;
	pkt->vp_index = vha->vp_idx;

	h = qlt_make_handle(vha);
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
		ha->tgt.cmds[h-1] = prm->cmd;

	pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
2901
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2902
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2903 2904 2905 2906
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2907 2908 2909 2910 2911 2912 2913

	/* silence compile warning */
	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->ox_id  = cpu_to_le16(t16);

	t16 = (atio->u.isp24.attr << 9);
	pkt->flags |= cpu_to_le16(t16);
2914 2915 2916 2917
	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);

	/* Set transfer direction */
	if (cmd->dma_data_direction == DMA_TO_DEVICE)
2918
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2919
	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2920
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944

	pkt->dseg_count = prm->tot_dsds;
	/* Fibre channel byte count */
	pkt->transfer_length = cpu_to_le32(transfer_length);

	/* ----- CRC context -------- */

	/* Allocate CRC context from global pool */
	crc_ctx_pkt = cmd->ctx =
	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);

	if (!crc_ctx_pkt)
		goto crc_queuing_error;

	/* Zero out CTX area. */
	clr_ptr = (uint8_t *)crc_ctx_pkt;
	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));

	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);

	/* Set handle */
	crc_ctx_pkt->handle = pkt->handle;

2945
	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968

	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;

	if (!bundling) {
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
	} else {
		/*
		 * Configure Bundling if we need to fetch interlaving
		 * protection PCI accesses
		 */
		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
		crc_ctx_pkt->u.bundling.dseg_count =
			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
	}

	/* Finish the common fields of CRC pkt */
	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2969
	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2970

2971 2972 2973 2974 2975 2976 2977 2978
	memset((uint8_t *)&tc, 0 , sizeof(tc));
	tc.vha = vha;
	tc.blk_sz = cmd->blk_sz;
	tc.bufflen = cmd->bufflen;
	tc.sg = cmd->sg;
	tc.prot_sg = cmd->prot_sg;
	tc.ctx = crc_ctx_pkt;
	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
2979 2980

	/* Walks data segments */
2981
	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2982 2983 2984

	if (!bundling && prm->prot_seg_cnt) {
		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2985
			prm->tot_dsds, &tc))
2986 2987
			goto crc_queuing_error;
	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2988
		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
2989 2990 2991 2992
		goto crc_queuing_error;

	if (bundling && prm->prot_seg_cnt) {
		/* Walks dif segments */
2993
		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2994 2995 2996

		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2997
			prm->prot_seg_cnt, &tc))
2998 2999 3000 3001 3002 3003
			goto crc_queuing_error;
	}
	return QLA_SUCCESS;

crc_queuing_error:
	/* Cleanup will be performed by the caller */
3004
	vha->hw->tgt.cmds[h - 1] = NULL;
3005 3006 3007 3008

	return QLA_FUNCTION_FAILED;
}

3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
/*
 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
 */
int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	uint8_t scsi_status)
{
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
	struct ctio7_to_24xx *pkt;
	struct qla_tgt_prm prm;
	uint32_t full_req_cnt = 0;
	unsigned long flags = 0;
	int res;

3024
	spin_lock_irqsave(&ha->hardware_lock, flags);
3025
	if (cmd->sess && cmd->sess->deleted) {
3026 3027 3028 3029 3030
		cmd->state = QLA_TGT_STATE_PROCESSED;
		if (cmd->sess->logout_completed)
			/* no need to terminate. FW already freed exchange. */
			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		else
3031
			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3032 3033 3034 3035 3036
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

3037 3038 3039
	memset(&prm, 0, sizeof(prm));

	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
3040 3041 3042 3043
	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
	    &cmd->se_cmd);
3044 3045 3046 3047 3048 3049 3050 3051 3052

	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
	    &full_req_cnt);
	if (unlikely(res != 0)) {
		return res;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);

3053 3054 3055 3056 3057
	if (xmit_type == QLA_TGT_XMIT_STATUS)
		vha->tgt_counters.core_qla_snd_status++;
	else
		vha->tgt_counters.core_qla_que_buf++;

3058
	if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
3059
		/*
3060
		 * Either the port is not online or this request was from
3061 3062 3063 3064 3065
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_PROCESSED;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe101,
3066 3067 3068
			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			cmd->reset_count, ha->chip_reset);
3069 3070 3071 3072
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}

3073 3074 3075 3076 3077
	/* Does F/W have an IOCBs for this request */
	res = qlt_check_reserve_free_req(vha, full_req_cnt);
	if (unlikely(res))
		goto out_unmap_unlock;

3078 3079 3080 3081
	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
		res = qlt_build_ctio_crc2_pkt(&prm, vha);
	else
		res = qlt_24xx_build_ctio_pkt(&prm, vha);
3082 3083
	if (unlikely(res != 0)) {
		vha->req->cnt += full_req_cnt;
3084
		goto out_unmap_unlock;
3085
	}
3086 3087 3088 3089 3090

	pkt = (struct ctio7_to_24xx *)prm.pkt;

	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
		pkt->u.status0.flags |=
3091
		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3092 3093
			CTIO7_FLAGS_STATUS_MODE_0);

3094 3095
		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
			qlt_load_data_segments(&prm, vha);
3096 3097 3098 3099 3100 3101 3102

		if (prm.add_status_pkt == 0) {
			if (xmit_type & QLA_TGT_XMIT_STATUS) {
				pkt->u.status0.scsi_status =
				    cpu_to_le16(prm.rq_result);
				pkt->u.status0.residual =
				    cpu_to_le32(prm.residual);
3103
				pkt->u.status0.flags |= cpu_to_le16(
3104 3105 3106
				    CTIO7_FLAGS_SEND_STATUS);
				if (qlt_need_explicit_conf(ha, cmd, 0)) {
					pkt->u.status0.flags |=
3107
					    cpu_to_le16(
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121
						CTIO7_FLAGS_EXPLICIT_CONFORM |
						CTIO7_FLAGS_CONFORM_REQ);
				}
			}

		} else {
			/*
			 * We have already made sure that there is sufficient
			 * amount of request entries to not drop HW lock in
			 * req_pkt().
			 */
			struct ctio7_to_24xx *ctio =
				(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);

3122 3123 3124
			ql_dbg(ql_dbg_io, vha, 0x305e,
			    "Building additional status packet 0x%p.\n",
			    ctio);
3125

3126 3127 3128 3129
			/*
			 * T10Dif: ctio_crc2_to_fw overlay ontop of
			 * ctio7_to_24xx
			 */
3130
			memcpy(ctio, pkt, sizeof(*ctio));
3131
			/* reset back to CTIO7 */
3132
			ctio->entry_count = 1;
3133
			ctio->entry_type = CTIO_TYPE7;
3134
			ctio->dseg_count = 0;
3135
			ctio->u.status1.flags &= ~cpu_to_le16(
3136 3137 3138 3139
			    CTIO7_FLAGS_DATA_IN);

			/* Real finish is ctio_m1's finish */
			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3140
			pkt->u.status0.flags |= cpu_to_le16(
3141
			    CTIO7_FLAGS_DONT_RET_CTIO);
3142 3143 3144 3145 3146

			/* qlt_24xx_init_ctio_to_isp will correct
			 * all neccessary fields that's part of CTIO7.
			 * There should be no residual of CTIO-CRC2 data.
			 */
3147 3148 3149 3150 3151 3152 3153 3154 3155
			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
			    &prm);
			pr_debug("Status CTIO7: %p\n", ctio);
		}
	} else
		qlt_24xx_init_ctio_to_isp(pkt, &prm);


	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3156
	cmd->cmd_sent_to_fw = 1;
3157

3158 3159
	/* Memory Barrier */
	wmb();
3160 3161 3162 3163 3164 3165
	qla2x00_start_iocbs(vha, vha->req);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return 0;

out_unmap_unlock:
J
Joern Engel 已提交
3166
	qlt_unmap_sg(vha, cmd);
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;
}
EXPORT_SYMBOL(qlt_xmit_response);

int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
	struct ctio7_to_24xx *pkt;
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = cmd->tgt;
	struct qla_tgt_prm prm;
	unsigned long flags;
	int res = 0;

	memset(&prm, 0, sizeof(prm));
	prm.cmd = cmd;
	prm.tgt = tgt;
	prm.sg = NULL;
	prm.req_cnt = 1;

	/* Send marker if required */
	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
		return -EIO;

	/* Calculate number of entries and segments required */
	if (qlt_pci_map_calc_cnt(&prm) != 0)
		return -EAGAIN;

	spin_lock_irqsave(&ha->hardware_lock, flags);

3199
	if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
3200
	    (cmd->sess && cmd->sess->deleted)) {
3201
		/*
3202
		 * Either the port is not online or this request was from
3203 3204 3205 3206 3207
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_NEED_DATA;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe102,
3208 3209 3210
			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			cmd->reset_count, ha->chip_reset);
3211 3212 3213 3214
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}

3215 3216 3217 3218
	/* Does F/W have an IOCBs for this request */
	res = qlt_check_reserve_free_req(vha, prm.req_cnt);
	if (res != 0)
		goto out_unlock_free_unmap;
3219 3220 3221 3222
	if (cmd->se_cmd.prot_op)
		res = qlt_build_ctio_crc2_pkt(&prm, vha);
	else
		res = qlt_24xx_build_ctio_pkt(&prm, vha);
3223

3224 3225
	if (unlikely(res != 0)) {
		vha->req->cnt += prm.req_cnt;
3226
		goto out_unlock_free_unmap;
3227 3228
	}

3229
	pkt = (struct ctio7_to_24xx *)prm.pkt;
3230
	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3231
	    CTIO7_FLAGS_STATUS_MODE_0);
3232 3233 3234

	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
		qlt_load_data_segments(&prm, vha);
3235 3236

	cmd->state = QLA_TGT_STATE_NEED_DATA;
3237
	cmd->cmd_sent_to_fw = 1;
3238

3239 3240
	/* Memory Barrier */
	wmb();
3241 3242 3243 3244 3245 3246
	qla2x00_start_iocbs(vha, vha->req);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;

out_unlock_free_unmap:
J
Joern Engel 已提交
3247
	qlt_unmap_sg(vha, cmd);
3248 3249 3250 3251 3252 3253
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;
}
EXPORT_SYMBOL(qlt_rdy_to_xfer);

3254 3255

/*
3256
 * it is assumed either hardware_lock or qpair lock is held.
3257
 */
3258
static void
3259
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
3260
	struct ctio_crc_from_fw *sts)
3261 3262 3263 3264
{
	uint8_t		*ap = &sts->actual_dif[0];
	uint8_t		*ep = &sts->expected_dif[0];
	uint64_t	lba = cmd->se_cmd.t_task_lba;
3265 3266
	uint8_t scsi_status, sense_key, asc, ascq;
	unsigned long flags;
3267

3268
	cmd->trc_flags |= TRC_DIF_ERR;
3269

3270 3271 3272
	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3273

3274 3275 3276
	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3277

3278 3279
	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3280

3281
	scsi_status = sense_key = asc = ascq = 0;
3282

3283 3284
	/* check appl tag */
	if (cmd->e_app_tag != cmd->a_app_tag) {
3285 3286 3287 3288 3289 3290
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3291 3292 3293 3294 3295 3296

		cmd->dif_err_code = DIF_ERR_APP;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x2;
3297 3298 3299
	}

	/* check ref tag */
3300
	if (cmd->e_ref_tag != cmd->a_ref_tag) {
3301 3302 3303 3304 3305 3306
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3307 3308 3309 3310 3311 3312

		cmd->dif_err_code = DIF_ERR_REF;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x3;
3313 3314 3315
		goto out;
	}

3316 3317
	/* check guard */
	if (cmd->e_guard != cmd->a_guard) {
3318 3319 3320 3321 3322 3323 3324
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);

3325 3326 3327 3328 3329
		cmd->dif_err_code = DIF_ERR_GRD;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x1;
3330 3331
	}
out:
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
	switch (cmd->state) {
	case QLA_TGT_STATE_NEED_DATA:
		/* handle_data will load DIF error code  */
		cmd->state = QLA_TGT_STATE_DATA_IN;
		vha->hw->tgt.tgt_ops->handle_data(cmd);
		break;
	default:
		spin_lock_irqsave(&cmd->cmd_lock, flags);
		if (cmd->aborted) {
			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
			vha->hw->tgt.tgt_ops->free_cmd(cmd);
			break;
		}
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3346

3347 3348 3349 3350 3351 3352 3353 3354
		qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
		/* assume scsi status gets out on the wire.
		 * Will not wait for completion.
		 */
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		break;
	}
}
3355

3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy)
{
	struct nack_to_isp *nack;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;

	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
	    "Sending TERM ELS CTIO (ha=%p)\n", ha);

3369
	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3370 3371 3372 3373 3374 3375 3376 3377 3378
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe080,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;
3379
	pkt->handle = QLA_TGT_SKIP_HANDLE;
3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
	}

	/* terminate */
	nack->u.isp24.flags |=
		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);

	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked)
{
	unsigned long flags = 0;
	int rc;

	if (qlt_issue_marker(vha, ha_locked) < 0)
		return;

	if (ha_locked) {
		rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo  */
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3422 3423 3424
#else
		if (rc) {
		}
3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441
#endif
		goto done;
	}

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo */
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
#endif

done:
	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}

3442 3443 3444 3445
/*
 * If hardware_lock held on entry, might drop it, then reaquire
 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
 */
3446 3447 3448 3449 3450 3451 3452 3453
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd,
	struct atio_from_isp *atio)
{
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;
3454
	uint16_t temp;
3455

3456
	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3457

3458
	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe050,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	if (cmd != NULL) {
		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
			ql_dbg(ql_dbg_tgt, vha, 0xe051,
			    "qla_target(%d): Terminating cmd %p with "
			    "incorrect state %d\n", vha->vp_idx, cmd,
			    cmd->state);
		} else
			ret = 1;
	}

3476
	vha->tgt_counters.num_term_xchg_sent++;
3477 3478 3479 3480 3481
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
3482
	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3483
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3484 3485 3486 3487 3488
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3489 3490 3491
	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
		CTIO7_FLAGS_TERMINATE;
	ctio24->u.status1.flags = cpu_to_le16(temp);
3492 3493
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3494 3495 3496 3497 3498 3499 3500 3501

	/* Most likely, it isn't needed */
	ctio24->u.status1.residual = get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
	if (ctio24->u.status1.residual != 0)
		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;

3502 3503
	/* Memory Barrier */
	wmb();
3504 3505 3506 3507 3508
	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3509 3510
	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
	int ul_abort)
3511
{
3512
	unsigned long flags = 0;
3513 3514 3515 3516 3517 3518 3519
	int rc;

	if (qlt_issue_marker(vha, ha_locked) < 0)
		return;

	if (ha_locked) {
		rc = __qlt_send_term_exchange(vha, cmd, atio);
3520 3521
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3522 3523 3524 3525
		goto done;
	}
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_exchange(vha, cmd, atio);
3526 3527
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3528

3529
done:
3530
	if (cmd && !ul_abort && !cmd->aborted) {
3531 3532
		if (cmd->sg_mapped)
			qlt_unmap_sg(vha, cmd);
3533 3534
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
	}
3535 3536 3537 3538

	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);

3539
	return;
3540 3541
}

3542 3543 3544 3545 3546 3547
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
	struct list_head free_list;
	struct qla_tgt_cmd *cmd, *tcmd;

	vha->hw->tgt.leak_exchg_thresh_hold =
3548
	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577

	cmd = tcmd = NULL;
	if (!list_empty(&vha->hw->tgt.q_full_list)) {
		INIT_LIST_HEAD(&free_list);
		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);

		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
			list_del(&cmd->cmd_list);
			/* This cmd was never sent to TCM.  There is no need
			 * to schedule free or call free_cmd
			 */
			qlt_free_cmd(cmd);
			vha->hw->tgt.num_qfull_cmds_alloc--;
		}
	}
	vha->hw->tgt.num_qfull_cmds_dropped = 0;
}

static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
{
	uint32_t total_leaked;

	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;

	if (vha->hw->tgt.leak_exchg_thresh_hold &&
	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {

		ql_dbg(ql_dbg_tgt, vha, 0xe079,
		    "Chip reset due to exchange starvation: %d/%d.\n",
3578
		    total_leaked, vha->hw->cur_fw_xcb_count);
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588

		if (IS_P3P_TYPE(vha->hw))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
	}

}

3589
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3590 3591 3592 3593
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct se_cmd *se_cmd = &cmd->se_cmd;
3594
	unsigned long flags;
3595 3596 3597 3598 3599 3600

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
	    "qla_target(%d): terminating exchange for aborted cmd=%p "
	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
	    se_cmd->tag);

3601 3602 3603 3604 3605 3606 3607 3608
	spin_lock_irqsave(&cmd->cmd_lock, flags);
	if (cmd->aborted) {
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
		/*
		 * It's normal to see 2 calls in this path:
		 *  1) XFER Rdy completion + CMD_T_ABORT
		 *  2) TCM TMR - drain_state_list
		 */
3609 3610 3611 3612
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
		    "multiple abort. %p transport_state %x, t_state %x, "
		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3613 3614
		return EIO;
	}
3615
	cmd->aborted = 1;
3616
	cmd->trc_flags |= TRC_ABORT;
3617
	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3618

3619 3620
	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
	return 0;
3621 3622 3623
}
EXPORT_SYMBOL(qlt_abort_cmd);

3624 3625
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
3626
	struct fc_port *sess = cmd->sess;
3627

3628 3629 3630 3631
	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
	    "%s: se_cmd[%p] ox_id %04x\n",
	    __func__, &cmd->se_cmd,
	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3632

3633 3634
	BUG_ON(cmd->cmd_in_wq);

3635 3636 3637
	if (cmd->sg_mapped)
		qlt_unmap_sg(cmd->vha, cmd);

3638 3639 3640
	if (!cmd->q_full)
		qlt_decr_num_pend_cmds(cmd->vha);

3641
	BUG_ON(cmd->sg_mapped);
3642
	cmd->jiffies_at_free = get_jiffies_64();
3643 3644
	if (unlikely(cmd->free_sg))
		kfree(cmd->sg);
3645 3646 3647 3648 3649

	if (!sess || !sess->se_sess) {
		WARN_ON(1);
		return;
	}
3650
	cmd->jiffies_at_free = get_jiffies_64();
3651
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
}
EXPORT_SYMBOL(qlt_free_cmd);

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
	struct qla_tgt_cmd *cmd, uint32_t status)
{
	int term = 0;

3663
	if (cmd->se_cmd.prot_op)
3664
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3665 3666 3667 3668 3669 3670 3671 3672
		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
		    "se_cmd=%p tag[%x] op %#x/%s",
		     cmd->lba, cmd->lba,
		     cmd->num_blks, &cmd->se_cmd,
		     cmd->atio.u.isp24.exchange_addr,
		     cmd->se_cmd.prot_op,
		     prot_op_str(cmd->se_cmd.prot_op));

3673 3674 3675
	if (ctio != NULL) {
		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
		term = !(c->flags &
3676
		    cpu_to_le16(OF_TERM_EXCH));
3677 3678 3679 3680
	} else
		term = 1;

	if (term)
3681
		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711

	return term;
}

/* ha->hardware_lock supposed to be held on entry */
static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
	uint32_t handle)
{
	struct qla_hw_data *ha = vha->hw;

	handle--;
	if (ha->tgt.cmds[handle] != NULL) {
		struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
		ha->tgt.cmds[handle] = NULL;
		return cmd;
	} else
		return NULL;
}

/* ha->hardware_lock supposed to be held on entry */
static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
	uint32_t handle, void *ctio)
{
	struct qla_tgt_cmd *cmd = NULL;

	/* Clear out internal marks */
	handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
	    CTIO_INTERMEDIATE_HANDLE_MARK);

	if (handle != QLA_TGT_NULL_HANDLE) {
3712
		if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3713
			return NULL;
3714

3715
		/* handle-1 is actually used */
3716
		if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
			ql_dbg(ql_dbg_tgt, vha, 0xe052,
			    "qla_target(%d): Wrong handle %x received\n",
			    vha->vp_idx, handle);
			return NULL;
		}
		cmd = qlt_get_cmd(vha, handle);
		if (unlikely(cmd == NULL)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe053,
			    "qla_target(%d): Suspicious: unable to "
			    "find the command with handle %x\n", vha->vp_idx,
			    handle);
			return NULL;
		}
	} else if (ctio != NULL) {
		/* We can't get loop ID from CTIO7 */
		ql_dbg(ql_dbg_tgt, vha, 0xe054,
		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
		    "support NULL handles\n", vha->vp_idx);
		return NULL;
	}

	return cmd;
}

3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772
/* hardware_lock should be held by caller. */
static void
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t handle;

	if (cmd->sg_mapped)
		qlt_unmap_sg(vha, cmd);

	handle = qlt_make_handle(vha);

	/* TODO: fix debug message type and ids. */
	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
		ql_dbg(ql_dbg_io, vha, 0xff00,
		    "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->write_data_transferred = 0;
		cmd->state = QLA_TGT_STATE_DATA_IN;

		ql_dbg(ql_dbg_io, vha, 0xff01,
		    "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
	} else {
		ql_dbg(ql_dbg_io, vha, 0xff03,
		    "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
		    cmd->state);
		dump_stack();
	}

3773
	cmd->trc_flags |= TRC_FLUSH;
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812
	ha->tgt.tgt_ops->free_cmd(cmd);
}

void
qlt_host_reset_handler(struct qla_hw_data *ha)
{
	struct qla_tgt_cmd *cmd;
	unsigned long flags;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
	scsi_qla_host_t *vha = NULL;
	struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
	uint32_t i;

	if (!base_vha->hw->tgt.tgt_ops)
		return;

	if (!tgt || qla_ini_mode_enabled(base_vha)) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
			"Target mode disabled\n");
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
	    "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
	    base_vha->dpc_flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
	for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
		cmd = qlt_get_cmd(base_vha, i);
		if (!cmd)
			continue;
		/* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
		vha = cmd->vha;
		qlt_abort_cmd_on_host_reset(vha, cmd);
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}


3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
	uint32_t status, void *ctio)
{
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd;
	struct qla_tgt_cmd *cmd;

	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
		/* That could happen only in case of an error/reset/abort */
		if (status != CTIO_SUCCESS) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
			    "Intermediate CTIO received"
			    " (status %x)\n", status);
		}
		return;
	}

	cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3834
	if (cmd == NULL)
3835
		return;
3836

3837
	se_cmd = &cmd->se_cmd;
3838
	cmd->cmd_sent_to_fw = 0;
3839

J
Joern Engel 已提交
3840
	qlt_unmap_sg(vha, cmd);
3841 3842 3843 3844 3845 3846

	if (unlikely(status != CTIO_SUCCESS)) {
		switch (status & 0xFFFF) {
		case CTIO_LIP_RESET:
		case CTIO_TARGET_RESET:
		case CTIO_ABORTED:
3847
			/* driver request abort via Terminate exchange */
3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
		case CTIO_TIMEOUT:
		case CTIO_INVALID_RX_ID:
			/* They are OK */
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
			    "qla_target(%d): CTIO with "
			    "status %#x received, state %x, se_cmd %p, "
			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
			    status, cmd->state, se_cmd);
			break;

		case CTIO_PORT_LOGGED_OUT:
		case CTIO_PORT_UNAVAILABLE:
3861
		{
3862 3863 3864
			int logged_out =
				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;

3865
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3866
			    "qla_target(%d): CTIO with %s status %x "
3867
			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
3868
			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3869 3870
			    status, cmd->state, se_cmd);

3871 3872 3873 3874 3875 3876 3877
			if (logged_out && cmd->sess) {
				/*
				 * Session is already logged out, but we need
				 * to notify initiator, who's not aware of this
				 */
				cmd->sess->logout_on_delete = 0;
				cmd->sess->send_els_logo = 1;
3878
				ql_dbg(ql_dbg_disc, vha, 0x20f8,
3879 3880 3881 3882
				    "%s %d %8phC post del sess\n",
				    __func__, __LINE__, cmd->sess->port_name);

				qlt_schedule_sess_for_deletion_lock(cmd->sess);
3883 3884 3885
			}
			break;
		}
3886 3887 3888 3889
		case CTIO_DIF_ERROR: {
			struct ctio_crc_from_fw *crc =
				(struct ctio_crc_from_fw *)ctio;
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3890 3891 3892
			    "qla_target(%d): CTIO with DIF_ERROR status %x "
			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
			    "expect_dif[0x%llx]\n",
3893 3894 3895 3896
			    vha->vp_idx, status, cmd->state, se_cmd,
			    *((u64 *)&crc->actual_dif[0]),
			    *((u64 *)&crc->expected_dif[0]));

3897 3898
			qlt_handle_dif_error(vha, cmd, ctio);
			return;
3899
		}
3900 3901
		default:
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3902
			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3903 3904 3905 3906
			    vha->vp_idx, status, cmd->state, se_cmd);
			break;
		}

3907

3908
		/* "cmd->aborted" means
3909 3910 3911 3912 3913 3914
		 * cmd is already aborted/terminated, we don't
		 * need to terminate again.  The exchange is already
		 * cleaned up/freed at FW level.  Just cleanup at driver
		 * level.
		 */
		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3915
		    (!cmd->aborted)) {
3916
			cmd->trc_flags |= TRC_CTIO_ERR;
3917 3918
			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
				return;
3919
		}
3920 3921 3922
	}

	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3923
		cmd->trc_flags |= TRC_CTIO_DONE;
3924 3925 3926
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->state = QLA_TGT_STATE_DATA_IN;

3927
		if (status == CTIO_SUCCESS)
3928 3929 3930 3931
			cmd->write_data_transferred = 1;

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
3932
	} else if (cmd->aborted) {
3933
		cmd->trc_flags |= TRC_CTIO_ABORTED;
3934
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3935
		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3936
	} else {
3937
		cmd->trc_flags |= TRC_CTIO_STRANGE;
3938 3939 3940 3941 3942
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
		    "qla_target(%d): A command in state (%d) should "
		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
	}

3943
	if (unlikely(status != CTIO_SUCCESS) &&
3944
		!cmd->aborted) {
3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
		dump_stack();
	}

	ha->tgt.tgt_ops->free_cmd(cmd);
}

static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
	uint8_t task_codes)
{
	int fcp_task_attr;

	switch (task_codes) {
	case ATIO_SIMPLE_QUEUE:
C
Christoph Hellwig 已提交
3959
		fcp_task_attr = TCM_SIMPLE_TAG;
3960 3961
		break;
	case ATIO_HEAD_OF_QUEUE:
C
Christoph Hellwig 已提交
3962
		fcp_task_attr = TCM_HEAD_TAG;
3963 3964
		break;
	case ATIO_ORDERED_QUEUE:
C
Christoph Hellwig 已提交
3965
		fcp_task_attr = TCM_ORDERED_TAG;
3966 3967
		break;
	case ATIO_ACA_QUEUE:
C
Christoph Hellwig 已提交
3968
		fcp_task_attr = TCM_ACA_TAG;
3969 3970
		break;
	case ATIO_UNTAGGED:
C
Christoph Hellwig 已提交
3971
		fcp_task_attr = TCM_SIMPLE_TAG;
3972 3973 3974 3975 3976
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
		    "qla_target: unknown task code %x, use ORDERED instead\n",
		    task_codes);
C
Christoph Hellwig 已提交
3977
		fcp_task_attr = TCM_ORDERED_TAG;
3978 3979 3980 3981 3982 3983
		break;
	}

	return fcp_task_attr;
}

3984
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
3985 3986 3987 3988
					uint8_t *);
/*
 * Process context for I/O path into tcm_qla2xxx code
 */
3989
static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3990 3991 3992
{
	scsi_qla_host_t *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
3993
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3994
	struct fc_port *sess = cmd->sess;
3995 3996 3997 3998 3999 4000
	struct atio_from_isp *atio = &cmd->atio;
	unsigned char *cdb;
	unsigned long flags;
	uint32_t data_length;
	int ret, fcp_task_attr, data_dir, bidi = 0;

4001
	cmd->cmd_in_wq = 0;
4002
	cmd->trc_flags |= TRC_DO_WORK;
4003 4004 4005
	if (tgt->tgt_stop)
		goto out_term;

4006
	if (cmd->aborted) {
4007 4008 4009 4010 4011 4012
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
		    "cmd with tag %u is aborted\n",
		    cmd->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4013
	spin_lock_init(&cmd->cmd_lock);
4014
	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4015
	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
	cmd->unpacked_lun = scsilun_to_int(
	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);

	if (atio->u.isp24.fcp_cmnd.rddata &&
	    atio->u.isp24.fcp_cmnd.wrdata) {
		bidi = 1;
		data_dir = DMA_TO_DEVICE;
	} else if (atio->u.isp24.fcp_cmnd.rddata)
		data_dir = DMA_FROM_DEVICE;
	else if (atio->u.isp24.fcp_cmnd.wrdata)
		data_dir = DMA_TO_DEVICE;
	else
		data_dir = DMA_NONE;

	fcp_task_attr = qlt_get_fcp_task_attr(vha,
	    atio->u.isp24.fcp_cmnd.task_attr);
	data_length = be32_to_cpu(get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]));

4036 4037
	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
				          fcp_task_attr, data_dir, bidi);
4038 4039 4040 4041 4042
	if (ret != 0)
		goto out_term;
	/*
	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
	 */
4043
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4044
	ha->tgt.tgt_ops->put_sess(sess);
4045
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4046 4047 4048
	return;

out_term:
4049
	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4050
	/*
4051 4052
	 * cmd has not sent to target yet, so pass NULL as the second
	 * argument to qlt_send_term_exchange() and free the memory here.
4053
	 */
4054
	cmd->trc_flags |= TRC_DO_WORK_ERR;
4055
	spin_lock_irqsave(&ha->hardware_lock, flags);
4056
	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
4057 4058

	qlt_decr_num_pend_cmds(vha);
4059 4060
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4061 4062

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4063
	ha->tgt.tgt_ops->put_sess(sess);
4064
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4065 4066 4067 4068 4069
}

static void qlt_do_work(struct work_struct *work)
{
	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4070 4071 4072 4073 4074 4075
	scsi_qla_host_t *vha = cmd->vha;
	unsigned long flags;

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&cmd->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4076 4077 4078 4079 4080

	__qlt_do_work(cmd);
}

static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4081
				       struct fc_port *sess,
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097
				       struct atio_from_isp *atio)
{
	struct se_session *se_sess = sess->se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return NULL;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	memcpy(&cmd->atio, atio, sizeof(*atio));
	cmd->state = QLA_TGT_STATE_NEW;
	cmd->tgt = vha->vha_tgt.qla_tgt;
4098
	qlt_incr_num_pend_cmds(vha);
4099 4100 4101 4102 4103 4104
	cmd->vha = vha;
	cmd->se_cmd.map_tag = tag;
	cmd->sess = sess;
	cmd->loop_id = sess->loop_id;
	cmd->conf_compl_supported = sess->conf_compl_supported;

4105
	cmd->trc_flags = 0;
4106 4107 4108 4109
	cmd->jiffies_at_alloc = get_jiffies_64();

	cmd->reset_count = vha->hw->chip_reset;

4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
	return cmd;
}

static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
			  uint16_t);

static void qlt_create_sess_from_atio(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
					struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
4122
	struct fc_port *sess;
4123 4124 4125 4126
	struct qla_tgt_cmd *cmd;
	unsigned long flags;
	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;

4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&op->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	if (op->aborted) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
		    "sess_op with tag %u is aborted\n",
		    op->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4138
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
4139 4140 4141
	    "qla_target(%d): Unable to find wwn login"
	    " (s_id %x:%x:%x), trying to create it manually\n",
	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
4142 4143 4144

	if (op->atio.u.raw.entry_count > 1) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
4145
		    "Dropping multy entry atio %p\n", &op->atio);
4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
		goto out_term;
	}

	sess = qlt_make_local_sess(vha, s_id);
	/* sess has an extra creation ref. */

	if (!sess)
		goto out_term;
	/*
	 * Now obtain a pre-allocated session tag using the original op->atio
	 * packet header, and dispatch into __qlt_do_work() using the existing
	 * process context.
	 */
	cmd = qlt_get_tag(vha, sess, &op->atio);
	if (!cmd) {
		spin_lock_irqsave(&ha->hardware_lock, flags);
		qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
4163
		ha->tgt.tgt_ops->put_sess(sess);
4164 4165 4166 4167
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		kfree(op);
		return;
	}
4168

4169
	/*
4170
	 * __qlt_do_work() will call qlt_put_sess() to release
4171 4172 4173 4174 4175 4176 4177
	 * the extra reference taken above by qlt_make_local_sess()
	 */
	__qlt_do_work(cmd);
	kfree(op);
	return;
out_term:
	spin_lock_irqsave(&ha->hardware_lock, flags);
4178
	qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
4179
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4180
	kfree(op);
4181 4182 4183 4184 4185 4186
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
	struct atio_from_isp *atio)
{
4187
	struct qla_hw_data *ha = vha->hw;
4188
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4189
	struct fc_port *sess;
4190
	struct qla_tgt_cmd *cmd;
4191
	unsigned long flags;
4192 4193

	if (unlikely(tgt->tgt_stop)) {
4194
		ql_dbg(ql_dbg_io, vha, 0x3061,
4195 4196 4197 4198
		    "New command while device %p is shutting down\n", tgt);
		return -EFAULT;
	}

4199 4200 4201 4202 4203 4204 4205 4206
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
	if (unlikely(!sess)) {
		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
						     GFP_ATOMIC);
		if (!op)
			return -ENOMEM;

		memcpy(&op->atio, atio, sizeof(*atio));
4207
		op->vha = vha;
4208

4209
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
4210
		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
4211
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4212

4213 4214 4215 4216
		INIT_WORK(&op->work, qlt_create_sess_from_atio);
		queue_work(qla_tgt_wq, &op->work);
		return 0;
	}
4217 4218 4219

	/* Another WWN used to have our s_id. Our PLOGI scheduled its
	 * session deletion, but it's still in sess_del_work wq */
4220
	if (sess->deleted) {
4221
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4222 4223 4224 4225 4226
		    "New command while old session %p is being deleted\n",
		    sess);
		return -EFAULT;
	}

4227 4228 4229
	/*
	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
	 */
4230
	if (!kref_get_unless_zero(&sess->sess_kref)) {
4231
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4232 4233 4234 4235 4236
		    "%s: kref_get fail, %8phC oxid %x \n",
		    __func__, sess->port_name,
		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
		return -EFAULT;
	}
4237 4238

	cmd = qlt_get_tag(vha, sess, atio);
4239
	if (!cmd) {
4240
		ql_dbg(ql_dbg_io, vha, 0x3062,
4241
		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4242 4243 4244
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		ha->tgt.tgt_ops->put_sess(sess);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4245 4246 4247
		return -ENOMEM;
	}

4248
	cmd->cmd_in_wq = 1;
4249
	cmd->trc_flags |= TRC_NEW_CMD;
4250 4251
	cmd->se_cmd.cpuid = ha->msix_count ?
		ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
4252

4253
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4254
	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4255
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4256

4257
	INIT_WORK(&cmd->work, qlt_do_work);
4258 4259 4260 4261 4262 4263 4264 4265 4266 4267
	if (ha->msix_count) {
		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
			queue_work_on(smp_processor_id(), qla_tgt_wq,
			    &cmd->work);
		else
			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
			    &cmd->work);
	} else {
		queue_work(qla_tgt_wq, &cmd->work);
	}
4268 4269 4270 4271 4272
	return 0;

}

/* ha->hardware_lock supposed to be held on entry */
4273
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4274 4275 4276 4277 4278
	int fn, void *iocb, int flags)
{
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4279
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298
	int res;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (!mcmd) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
		    "qla_target(%d): Allocation of management "
		    "command failed, some commands and their data could "
		    "leak\n", vha->vp_idx);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));
	mcmd->sess = sess;

	if (iocb) {
		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
		    sizeof(mcmd->orig_iocb.imm_ntfy));
	}
	mcmd->tmr_func = fn;
	mcmd->flags = flags;
4299
	mcmd->reset_count = vha->hw->chip_reset;
4300 4301 4302

	switch (fn) {
	case QLA_TGT_LUN_RESET:
4303 4304
	    abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
	    break;
4305 4306
	}

4307
	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
	if (res != 0) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
		    sess->vha->vp_idx, res);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt;
4325
	struct fc_port *sess;
4326
	u64 unpacked_lun;
4327
	int fn;
4328
	unsigned long flags;
4329

4330
	tgt = vha->vha_tgt.qla_tgt;
4331 4332

	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4333 4334

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4335 4336
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    a->u.isp24.fcp_hdr.s_id);
4337 4338
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4339 4340
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4341 4342 4343 4344 4345 4346 4347 4348 4349

	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
		    "qla_target(%d): task mgmt fn 0x%x for "
		    "non-existant session\n", vha->vp_idx, fn);
		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
		    sizeof(struct atio_from_isp));
	}

4350
	if (sess->deleted)
4351 4352
		return -EFAULT;

4353 4354 4355 4356 4357
	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}

/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
4358
	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4359 4360 4361 4362
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4363
	u64 unpacked_lun;
4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378
	int rc;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
	    sizeof(mcmd->orig_iocb.imm_ntfy));

4379 4380
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4381
	mcmd->reset_count = vha->hw->chip_reset;
4382
	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4383

4384
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
	    le16_to_cpu(iocb->u.isp2x.seq_id));
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
		    vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_abort_task(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
4402
	struct fc_port *sess;
4403
	int loop_id;
4404
	unsigned long flags;
4405 4406 4407

	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);

4408
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4409
	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4410 4411
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4412 4413 4414 4415
	if (sess == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
		    "qla_target(%d): task abort for unexisting "
		    "session\n", vha->vp_idx);
4416
		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4417 4418 4419 4420 4421 4422
		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
	}

	return __qlt_abort_task(vha, iocb, sess);
}

4423 4424
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
	if (rc != MBS_COMMAND_COMPLETE) {
		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
			"%s: se_sess %p / sess %p from"
			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
			" LOGO failed: %#x\n",
			__func__,
			fcport->se_sess,
			fcport,
			fcport->port_name, fcport->loop_id,
			fcport->d_id.b.domain, fcport->d_id.b.area,
			fcport->d_id.b.al_pa, rc);
	}

	fcport->logout_completed = 1;
4439 4440 4441 4442 4443 4444 4445 4446 4447
}

/*
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
*
* Schedules sessions with matching port_id/loop_id but different wwn for
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
4448 4449
struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4450
    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4451
{
4452
	struct fc_port *sess = NULL, *other_sess;
4453 4454
	uint64_t other_wwn;

4455 4456
	*conflict_sess = NULL;

4457
	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4458 4459 4460 4461 4462 4463 4464 4465 4466 4467

		other_wwn = wwn_to_u64(other_sess->port_name);

		if (wwn == other_wwn) {
			WARN_ON(sess);
			sess = other_sess;
			continue;
		}

		/* find other sess with nport_id collision */
4468
		if (port_id.b24 == other_sess->d_id.b24) {
4469
			if (loop_id != other_sess->loop_id) {
4470
				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);

				/*
				 * logout_on_delete is set by default, but another
				 * session that has the same s_id/loop_id combo
				 * might have cleared it when requested this session
				 * deletion, so don't touch it
				 */
				qlt_schedule_sess_for_deletion(other_sess, true);
			} else {
				/*
				 * Another wwn used to have our s_id/loop_id
4484
				 * kill the session, but don't free the loop_id
4485
				 */
4486
				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4487 4488 4489 4490
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);


4491 4492
				other_sess->keep_nport_handle = 1;
				*conflict_sess = other_sess;
4493 4494 4495 4496 4497 4498 4499
				qlt_schedule_sess_for_deletion(other_sess,
				    true);
			}
			continue;
		}

		/* find other sess with nport handle collision */
4500 4501 4502
		if ((loop_id == other_sess->loop_id) &&
			(loop_id != FC_NO_LOOP_ID)) {
			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514
			       "Invalidating sess %p loop_id %d wwn %llx.\n",
			       other_sess, other_sess->loop_id, other_wwn);

			/* Same loop_id but different s_id
			 * Ok to kill and logout */
			qlt_schedule_sess_for_deletion(other_sess, true);
		}
	}

	return sess;
}

4515 4516 4517 4518 4519 4520 4521
/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
	int count = 0;
4522
	unsigned long flags;
4523 4524 4525 4526 4527

	key = (((u32)s_id->b.domain << 16) |
	       ((u32)s_id->b.area   <<  8) |
	       ((u32)s_id->b.al_pa));

4528
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4529 4530
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4531

4532 4533 4534 4535 4536
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}
4537 4538 4539 4540 4541 4542 4543 4544 4545

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}

4546 4547 4548
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		if (cmd_key == key) {
4549
			cmd->aborted = 1;
4550 4551 4552
			count++;
		}
	}
4553
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4554 4555 4556 4557

	return count;
}

4558 4559 4560 4561 4562 4563
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
4564
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4565
	struct qla_hw_data *ha = vha->hw;
4566
	struct fc_port *sess = NULL, *conflict_sess = NULL;
4567 4568 4569 4570
	uint64_t wwn;
	port_id_t port_id;
	uint16_t loop_id;
	uint16_t wd3_lo;
4571
	int res = 0;
4572
	struct qlt_plogi_ack_t *pla;
4573
	unsigned long flags;
4574

4575 4576 4577 4578 4579 4580 4581 4582 4583
	wwn = wwn_to_u64(iocb->u.isp24.port_name);

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

4584 4585 4586 4587 4588 4589
	ql_dbg(ql_dbg_disc, vha, 0xf026,
	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
	    vha->vp_idx, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		   iocb->u.isp24.status_subcode, loop_id,
		iocb->u.isp24.port_name);
4590

4591 4592 4593
	/* res = 1 means ack at the end of thread
	 * res = 0 means ack async/later.
	 */
4594 4595
	switch (iocb->u.isp24.status_subcode) {
	case ELS_PLOGI:
4596

4597 4598 4599
		/* Mark all stale commands in qla_tgt_wq for deletion */
		abort_cmds_for_s_id(vha, &port_id);

4600 4601
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4602 4603
			sess = qlt_find_sess_invalidate_other(vha, wwn,
				port_id, loop_id, &conflict_sess);
4604 4605
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4606

4607
		if (IS_SW_RESV_ADDR(port_id)) {
4608 4609 4610 4611
			res = 1;
			break;
		}

4612 4613
		pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
		if (!pla) {
4614 4615 4616 4617 4618 4619
			qlt_send_term_imm_notif(vha, iocb, 1);
			break;
		}

		res = 0;

4620 4621
		if (conflict_sess) {
			conflict_sess->login_gen++;
4622
			qlt_plogi_ack_link(vha, pla, conflict_sess,
4623 4624
				QLT_PLOGI_LINK_CONFLICT);
		}
4625

4626 4627 4628 4629 4630
		if (!sess) {
			pla->ref_count++;
			qla24xx_post_newsess_work(vha, &port_id,
				iocb->u.isp24.port_name, pla);
			res = 0;
4631
			break;
4632
		}
4633 4634

		qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665
		sess->fw_login_state = DSC_LS_PLOGI_PEND;
		sess->d_id = port_id;
		sess->login_gen++;

		switch (sess->disc_state) {
		case DSC_DELETED:
			qlt_plogi_ack_unref(vha, pla);
			break;

		default:
			/*
			 * Under normal circumstances we want to release nport handle
			 * during LOGO process to avoid nport handle leaks inside FW.
			 * The exception is when LOGO is done while another PLOGI with
			 * the same nport handle is waiting as might be the case here.
			 * Note: there is always a possibily of a race where session
			 * deletion has already started for other reasons (e.g. ACL
			 * removal) and now PLOGI arrives:
			 * 1. if PLOGI arrived in FW after nport handle has been freed,
			 *    FW must have assigned this PLOGI a new/same handle and we
			 *    can proceed ACK'ing it as usual when session deletion
			 *    completes.
			 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
			 *    bit reached it, the handle has now been released. We'll
			 *    get an error when we ACK this PLOGI. Nothing will be sent
			 *    back to initiator. Initiator should eventually retry
			 *    PLOGI and situation will correct itself.
			 */
			sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
			   (sess->d_id.b24 == port_id.b24));

4666 4667 4668
			ql_dbg(ql_dbg_disc, vha, 0x20f9,
			    "%s %d %8phC post del sess\n",
			    __func__, __LINE__, sess->port_name);
4669 4670 4671 4672 4673 4674


			qlt_schedule_sess_for_deletion_lock(sess);
			break;
		}

4675 4676
		break;

4677
	case ELS_PRLI:
4678 4679
		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);

4680 4681
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4682 4683
			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
				loop_id, &conflict_sess);
4684 4685
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4686 4687 4688 4689 4690 4691 4692 4693 4694

		if (conflict_sess) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
			    "PRLI with conflicting sess %p port %8phC\n",
			    conflict_sess, conflict_sess->port_name);
			qlt_send_term_imm_notif(vha, iocb, 1);
			res = 0;
			break;
		}
4695 4696

		if (sess != NULL) {
4697 4698
			if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
			    sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4699 4700 4701 4702 4703
				/*
				 * Impatient initiator sent PRLI before last
				 * PLOGI could finish. Will force him to re-try,
				 * while last one finishes.
				 */
4704
				ql_log(ql_log_warn, sess->vha, 0xf095,
4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715
				    "sess %p PRLI received, before plogi ack.\n",
				    sess);
				qlt_send_term_imm_notif(vha, iocb, 1);
				res = 0;
				break;
			}

			/*
			 * This shouldn't happen under normal circumstances,
			 * since we have deleted the old session during PLOGI
			 */
4716
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4717 4718 4719 4720 4721
			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
			    sess->loop_id, sess, iocb->u.isp24.nport_handle);

			sess->local = 0;
			sess->loop_id = loop_id;
4722
			sess->d_id = port_id;
4723
			sess->fw_login_state = DSC_LS_PRLI_PEND;
4724 4725 4726 4727

			if (wd3_lo & BIT_7)
				sess->conf_compl_supported = 1;

4728 4729 4730 4731
			if ((wd3_lo & BIT_4) == 0)
				sess->port_type = FCT_INITIATOR;
			else
				sess->port_type = FCT_TARGET;
4732 4733 4734 4735 4736
		}
		res = 1; /* send notify ack */

		/* Make session global (not used in fabric mode) */
		if (ha->current_topology != ISP_CFG_F) {
4737
			if (sess) {
4738
				ql_dbg(ql_dbg_disc, vha, 0x20fa,
4739 4740 4741 4742 4743 4744 4745 4746 4747 4748
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			} else {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
4749
		} else {
4750
			if (sess) {
4751
				ql_dbg(ql_dbg_disc, vha, 0x20fb,
4752 4753
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
4754 4755 4756 4757 4758
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			}
		}
4759 4760
		break;

4761 4762 4763 4764 4765 4766 4767 4768 4769
	case ELS_TPRLO:
		if (le16_to_cpu(iocb->u.isp24.flags) &
			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
			loop_id = 0xFFFF;
			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
			res = 1;
			break;
		}
		/* drop through */
4770 4771
	case ELS_LOGO:
	case ELS_PRLO:
4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		if (sess) {
			sess->login_gen++;
			sess->fw_login_state = DSC_LS_LOGO_PEND;
			sess->logo_ack_needed = 1;
			memcpy(sess->iocb, iocb, IOCB_SIZE);
		}

4783
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4784

4785
		ql_dbg(ql_dbg_disc, vha, 0x20fc,
4786 4787 4788
		    "%s: logo %llx res %d sess %p ",
		    __func__, wwn, res, sess);
		if (res == 0) {
4789 4790 4791 4792
			/*
			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
			 * for LOGO_ACK & sess delete
			 */
4793 4794 4795
			BUG_ON(!sess);
			res = 0;
		} else {
4796
			/* cmd did not go to upper layer. */
4797 4798 4799 4800 4801 4802
			if (sess) {
				qlt_schedule_sess_for_deletion_lock(sess);
				res = 0;
			}
			/* else logo will be ack */
		}
4803 4804 4805 4806
		break;
	case ELS_PDISC:
	case ELS_ADISC:
	{
4807
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4808 4809 4810 4811 4812
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
4813 4814 4815 4816

		sess = qla2x00_find_fcport_by_wwpn(vha,
		    iocb->u.isp24.port_name, 1);
		if (sess) {
4817
			ql_dbg(ql_dbg_disc, vha, 0x20fd,
4818 4819 4820 4821 4822
				"sess %p lid %d|%d DS %d LS %d\n",
				sess, sess->loop_id, loop_id,
				sess->disc_state, sess->fw_login_state);
		}

4823 4824 4825 4826
		res = 1; /* send notify ack */
		break;
	}

4827
	case ELS_FLOGI:	/* should never happen */
4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
		    "qla_target(%d): Unsupported ELS command %x "
		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
		break;
	}

	return res;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t add_flags = 0;
	int send_notify_ack = 1;
	uint16_t status;

	status = le16_to_cpu(iocb->u.isp2x.status);
	switch (status) {
	case IMM_NTFY_LIP_RESET:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_LIP_LINK_REINIT:
	{
4866
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
		    "qla_target(%d): LINK REINIT (loop %#x, "
		    "subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
		}
		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
		tgt->link_reinit_iocb_pending = 1;
		/*
		 * QLogic requires to wait after LINK REINIT for possible
		 * PDISC or ADISC ELS commands
		 */
		send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_PORT_LOGOUT:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
		    "qla_target(%d): Port logout (loop "
		    "%#x, subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_TPRLO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_PORT_CONFIG:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
		    status);
		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_LOGO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
		    "qla_target(%d): Link failure detected\n",
		    vha->vp_idx);
		/* I_T nexus loss */
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_IOCB_OVERFLOW:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
		    "qla_target(%d): Cannot provide requested "
		    "capability (IOCB overflowed the immediate notify "
		    "resource count)\n", vha->vp_idx);
		break;

	case IMM_NTFY_ABORT_TASK:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
		    "qla_target(%d): Abort Task (S %08x I %#x -> "
		    "L %#x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp2x.seq_id),
		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
		    le16_to_cpu(iocb->u.isp2x.lun));
		if (qlt_abort_task(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_RESOURCE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
		    "qla_target(%d): Out of resources, host %ld\n",
		    vha->vp_idx, vha->host_no);
		break;

	case IMM_NTFY_MSG_RX:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
		    "qla_target(%d): Immediate notify task %x\n",
		    vha->vp_idx, iocb->u.isp2x.task_flags);
		if (qlt_handle_task_mgmt(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_ELS:
		if (qlt_24xx_handle_els(vha, iocb) == 0)
			send_notify_ack = 0;
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
		    "qla_target(%d): Received unknown immediate "
		    "notify status %x\n", vha->vp_idx, status);
		break;
	}

	if (send_notify_ack)
		qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 * This function sends busy to ISP 2xxx or 24xx.
 */
4975
static int __qlt_send_busy(struct scsi_qla_host *vha,
4976 4977 4978 4979 4980
	struct atio_from_isp *atio, uint16_t status)
{
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
4981
	struct fc_port *sess = NULL;
4982
	unsigned long flags;
4983
	u16 temp;
4984

4985
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4986 4987
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    atio->u.isp24.fcp_hdr.s_id);
4988
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4989
	if (!sess) {
4990
		qlt_send_term_exchange(vha, NULL, atio, 1, 0);
4991
		return 0;
4992 4993 4994 4995 4996
	}
	/* Sending marker isn't necessary, since we called from ISR */

	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
	if (!pkt) {
4997
		ql_dbg(ql_dbg_io, vha, 0x3063,
4998 4999
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
5000
		return -ENOMEM;
5001 5002
	}

5003
	vha->tgt_counters.num_q_full_sent++;
5004 5005 5006 5007 5008 5009
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
	ctio24->nport_handle = sess->loop_id;
5010
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5011 5012 5013 5014 5015
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5016
	temp = (atio->u.isp24.attr << 9) |
5017
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5018 5019
		CTIO7_FLAGS_DONT_RET_CTIO;
	ctio24->u.status1.flags = cpu_to_le16(temp);
5020 5021 5022 5023 5024 5025
	/*
	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
	 * if the explicit conformation is used.
	 */
	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5026 5027
	/* Memory Barrier */
	wmb();
5028
	qla2x00_start_iocbs(vha, vha->req);
5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042
	return 0;
}

/*
 * This routine is used to allocate a command for either a QFull condition
 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
 * out previously.
 */
static void
qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull)
{
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_hw_data *ha = vha->hw;
5043
	struct fc_port *sess;
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056
	struct se_session *se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;

	if (unlikely(tgt->tgt_stop)) {
		ql_dbg(ql_dbg_io, vha, 0x300a,
			"New command while device %p is shutting down\n", tgt);
		return;
	}

	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5057 5058
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088
				vha->hw->tgt.num_qfull_cmds_dropped;

		ql_dbg(ql_dbg_io, vha, 0x3068,
			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
			vha->vp_idx, __func__,
			vha->hw->tgt.num_qfull_cmds_dropped);

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	sess = ha->tgt.tgt_ops->find_sess_by_s_id
		(vha, atio->u.isp24.fcp_hdr.s_id);
	if (!sess)
		return;

	se_sess = sess->se_sess;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	if (!cmd) {
		ql_dbg(ql_dbg_io, vha, 0x3009,
			"qla_target(%d): %s: Allocation of cmd failed\n",
			vha->vp_idx, __func__);

		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5089 5090
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118
				vha->hw->tgt.num_qfull_cmds_dropped;

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	qlt_incr_num_pend_cmds(vha);
	INIT_LIST_HEAD(&cmd->cmd_list);
	memcpy(&cmd->atio, atio, sizeof(*atio));

	cmd->tgt = vha->vha_tgt.qla_tgt;
	cmd->vha = vha;
	cmd->reset_count = vha->hw->chip_reset;
	cmd->q_full = 1;

	if (qfull) {
		cmd->q_full = 1;
		/* NOTE: borrowing the state field to carry the status */
		cmd->state = status;
	} else
		cmd->term_exchg = 1;

	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);

	vha->hw->tgt.num_qfull_cmds_alloc++;
	if (vha->hw->tgt.num_qfull_cmds_alloc >
5119 5120
		vha->qla_stats.stat_max_qfull_cmds_alloc)
		vha->qla_stats.stat_max_qfull_cmds_alloc =
5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200
			vha->hw->tgt.num_qfull_cmds_alloc;
}

int
qlt_free_qfull_cmds(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
	struct qla_tgt_cmd *cmd, *tcmd;
	struct list_head free_list;
	int rc = 0;

	if (list_empty(&ha->tgt.q_full_list))
		return 0;

	INIT_LIST_HEAD(&free_list);

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);

	if (list_empty(&ha->tgt.q_full_list)) {
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
		return 0;
	}

	list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
		if (cmd->q_full)
			/* cmd->state is a borrowed field to hold status */
			rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
		else if (cmd->term_exchg)
			rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);

		if (rc == -ENOMEM)
			break;

		if (cmd->q_full)
			ql_dbg(ql_dbg_io, vha, 0x3006,
			    "%s: busy sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else if (cmd->term_exchg)
			ql_dbg(ql_dbg_io, vha, 0x3007,
			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else
			ql_dbg(ql_dbg_io, vha, 0x3008,
			    "%s: Unexpected cmd in QFull list %p\n", __func__,
			    cmd);

		list_del(&cmd->cmd_list);
		list_add_tail(&cmd->cmd_list, &free_list);

		/* piggy back on hardware_lock for protection */
		vha->hw->tgt.num_qfull_cmds_alloc--;
	}
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);

	cmd = NULL;

	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
		list_del(&cmd->cmd_list);
		/* This cmd was never sent to TCM.  There is no need
		 * to schedule free or call free_cmd
		 */
		qlt_free_cmd(cmd);
	}
	return rc;
}

static void
qlt_send_busy(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status)
{
	int rc = 0;

	rc = __qlt_send_busy(vha, atio, status);
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, status, 1);
}

static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5201
	struct atio_from_isp *atio, bool ha_locked)
5202 5203 5204
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t status;
5205
	unsigned long flags;
5206 5207 5208 5209

	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
		return 0;

5210 5211
	if (!ha_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);
5212 5213
	status = temp_sam_status;
	qlt_send_busy(vha, atio, status);
5214 5215 5216
	if (!ha_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

5217
	return 1;
5218 5219 5220 5221 5222
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5223
	struct atio_from_isp *atio, uint8_t ha_locked)
5224 5225
{
	struct qla_hw_data *ha = vha->hw;
5226
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5227
	int rc;
5228
	unsigned long flags;
5229 5230

	if (unlikely(tgt == NULL)) {
5231
		ql_dbg(ql_dbg_tgt, vha, 0x3064,
5232 5233 5234 5235 5236 5237 5238 5239
		    "ATIO pkt, but no tgt (ha %p)", ha);
		return;
	}
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

5240
	tgt->atio_irq_cmd_count++;
5241 5242 5243 5244 5245

	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
		if (unlikely(atio->u.isp24.exchange_addr ==
		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5246
			ql_dbg(ql_dbg_io, vha, 0x3065,
5247 5248 5249
			    "qla_target(%d): ATIO_TYPE7 "
			    "received with UNKNOWN exchange address, "
			    "sending QUEUE_FULL\n", vha->vp_idx);
5250 5251
			if (!ha_locked)
				spin_lock_irqsave(&ha->hardware_lock, flags);
5252
			qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5253 5254
			if (!ha_locked)
				spin_unlock_irqrestore(&ha->hardware_lock, flags);
5255 5256
			break;
		}
5257 5258 5259 5260



		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5261
			rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
5262
			if (rc != 0) {
5263
				tgt->atio_irq_cmd_count--;
5264 5265
				return;
			}
5266
			rc = qlt_handle_cmd_for_atio(vha, atio);
5267
		} else {
5268
			rc = qlt_handle_task_mgmt(vha, atio);
5269
		}
5270 5271
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
5272 5273 5274 5275
				if (!ha_locked)
					spin_lock_irqsave
						(&ha->hardware_lock, flags);

5276 5277 5278
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
				qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
5279
				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5280
#endif
5281 5282 5283 5284 5285

				if (!ha_locked)
					spin_unlock_irqrestore
						(&ha->hardware_lock, flags);

5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe059,
					    "qla_target: Unable to send "
					    "command to target for req, "
					    "ignoring.\n");
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status.\n", vha->vp_idx);
5297 5298 5299
					if (!ha_locked)
						spin_lock_irqsave(
						    &ha->hardware_lock, flags);
5300
					qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5301 5302 5303
					if (!ha_locked)
						spin_unlock_irqrestore(
						    &ha->hardware_lock, flags);
5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319
				}
			}
		}
		break;

	case IMMED_NOTIFY_TYPE:
	{
		if (unlikely(atio->u.isp2x.entry_status != 0)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
			    "qla_target(%d): Received ATIO packet %x "
			    "with error status %x\n", vha->vp_idx,
			    atio->u.raw.entry_type,
			    atio->u.isp2x.entry_status);
			break;
		}
		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5320 5321 5322

		if (!ha_locked)
			spin_lock_irqsave(&ha->hardware_lock, flags);
5323
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5324 5325
		if (!ha_locked)
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5326 5327 5328 5329 5330 5331 5332 5333 5334 5335
		break;
	}

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

5336
	tgt->atio_irq_cmd_count--;
5337 5338 5339 5340 5341 5342 5343
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
	struct qla_hw_data *ha = vha->hw;
5344
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
		    "qla_target(%d): Response pkt %x received, but no "
		    "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
		return;
	}

	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	tgt->irq_cmd_count++;

	switch (pkt->entry_type) {
5361
	case CTIO_CRC2:
5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case ACCEPT_TGT_IO_TYPE:
	{
		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
		int rc;
		if (atio->u.isp2x.status !=
5376
		    cpu_to_le16(ATIO_CDB_VALID)) {
5377 5378 5379 5380 5381 5382 5383
			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
			    "qla_target(%d): ATIO with error "
			    "status %x received\n", vha->vp_idx,
			    le16_to_cpu(atio->u.isp2x.status));
			break;
		}

5384
		rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
5385 5386 5387 5388 5389
		if (rc != 0) {
			tgt->irq_cmd_count--;
			return;
		}

5390 5391 5392 5393 5394 5395
		rc = qlt_handle_cmd_for_atio(vha, atio);
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
				qlt_send_busy(vha, atio, 0);
#else
5396
				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5397 5398 5399 5400 5401 5402 5403 5404
#endif
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
					    "qla_target: Unable to send "
					    "command to target, sending TERM "
					    "EXCHANGE for rsp\n");
					qlt_send_term_exchange(vha, NULL,
5405
					    atio, 1, 0);
5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe060,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status\n", vha->vp_idx);
					qlt_send_busy(vha, atio, 0);
				}
			}
		}
	}
	break;

	case CONTINUE_TGT_IO_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case CTIO_A64_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case IMMED_NOTIFY_TYPE:
		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
		break;

	case NOTIFY_ACK_TYPE:
		if (tgt->notify_ack_expected > 0) {
			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe036,
			    "NOTIFY_ACK seq %08x status %x\n",
			    le16_to_cpu(entry->u.isp2x.seq_id),
			    le16_to_cpu(entry->u.isp2x.status));
			tgt->notify_ack_expected--;
			if (entry->u.isp2x.status !=
5450
			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529
				ql_dbg(ql_dbg_tgt, vha, 0xe061,
				    "qla_target(%d): NOTIFY_ACK "
				    "failed %x\n", vha->vp_idx,
				    le16_to_cpu(entry->u.isp2x.status));
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe062,
			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
			    vha->vp_idx);
		}
		break;

	case ABTS_RECV_24XX:
		ql_dbg(ql_dbg_tgt, vha, 0xe037,
		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
		break;

	case ABTS_RESP_24XX:
		if (tgt->abts_resp_expected > 0) {
			struct abts_resp_from_24xx_fw *entry =
				(struct abts_resp_from_24xx_fw *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe038,
			    "ABTS_RESP_24XX: compl_status %x\n",
			    entry->compl_status);
			tgt->abts_resp_expected--;
			if (le16_to_cpu(entry->compl_status) !=
			    ABTS_RESP_COMPL_SUCCESS) {
				if ((entry->error_subcode1 == 0x1E) &&
				    (entry->error_subcode2 == 0)) {
					/*
					 * We've got a race here: aborted
					 * exchange not terminated, i.e.
					 * response for the aborted command was
					 * sent between the abort request was
					 * received and processed.
					 * Unfortunately, the firmware has a
					 * silly requirement that all aborted
					 * exchanges must be explicitely
					 * terminated, otherwise it refuses to
					 * send responses for the abort
					 * requests. So, we have to
					 * (re)terminate the exchange and retry
					 * the abort response.
					 */
					qlt_24xx_retry_term_exchange(vha,
					    entry);
				} else
					ql_dbg(ql_dbg_tgt, vha, 0xe063,
					    "qla_target(%d): ABTS_RESP_24XX "
					    "failed %x (subcode %x:%x)",
					    vha->vp_idx, entry->compl_status,
					    entry->error_subcode1,
					    entry->error_subcode2);
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe064,
			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
			    "received\n", vha->vp_idx);
		}
		break;

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe065,
		    "qla_target(%d): Received unknown response pkt "
		    "type %x\n", vha->vp_idx, pkt->entry_type);
		break;
	}

	tgt->irq_cmd_count--;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
	uint16_t *mailbox)
{
	struct qla_hw_data *ha = vha->hw;
5530
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5531
	int login_code;
5532

5533
	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558
		return;

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe03a,
		    "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
		return;
	}

	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
	    IS_QLA2100(ha))
		return;
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	tgt->irq_cmd_count++;

	switch (code) {
	case MBA_RESET:			/* Reset */
	case MBA_SYSTEM_ERR:		/* System Error */
	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
		    "qla_target(%d): System error async event %#x "
5559
		    "occurred", vha->vp_idx, code);
5560 5561 5562 5563 5564 5565 5566 5567
		break;
	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		break;

	case MBA_LOOP_UP:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5568
		    "qla_target(%d): Async LOOP_UP occurred "
5569 5570 5571
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
		break;
	}

	case MBA_LIP_OCCURRED:
	case MBA_LOOP_DOWN:
	case MBA_LIP_RESET:
	case MBA_RSCN_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5585
		    "qla_target(%d): Async event %#x occurred "
5586 5587 5588
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5589 5590
		break;

5591
	case MBA_REJECTED_FCP_CMD:
5592 5593 5594 5595 5596
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
		    vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5597 5598 5599 5600 5601

		if (le16_to_cpu(mailbox[3]) == 1) {
			/* exchange starvation. */
			vha->hw->exch_starvation++;
			if (vha->hw->exch_starvation > 5) {
5602
				ql_log(ql_log_warn, vha, 0xd03a,
5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616
				    "Exchange starvation-. Resetting RISC\n");

				vha->hw->exch_starvation = 0;
				if (IS_P3P_TYPE(vha->hw))
					set_bit(FCOE_CTX_RESET_NEEDED,
					    &vha->dpc_flags);
				else
					set_bit(ISP_ABORT_NEEDED,
					    &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
		}
		break;

5617 5618 5619
	case MBA_PORT_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
		    "qla_target(%d): Port update async event %#x "
5620
		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5621 5622 5623 5624 5625
		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));

		login_code = le16_to_cpu(mailbox[2]);
5626
		if (login_code == 0x4) {
5627 5628
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
			    "Async MB 2: Got PLOGI Complete\n");
5629 5630
			vha->hw->exch_starvation = 0;
		} else if (login_code == 0x7)
5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
			    "Async MB 2: Port Logged Out\n");
		break;
	default:
		break;
	}

	tgt->irq_cmd_count--;
}

static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
	uint16_t loop_id)
{
5644
	fc_port_t *fcport, *tfcp, *del;
5645
	int rc;
5646 5647
	unsigned long flags;
	u8 newfcport = 0;
5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658

	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
	if (!fcport) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
		    "qla_target(%d): Allocation of tmp FC port failed",
		    vha->vp_idx);
		return NULL;
	}

	fcport->loop_id = loop_id;

5659
	rc = qla24xx_gpdb_wait(vha, fcport, 0);
5660 5661 5662 5663 5664 5665 5666 5667 5668
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
		    "qla_target(%d): Failed to retrieve fcport "
		    "information -- get_port_database() returned %x "
		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
		kfree(fcport);
		return NULL;
	}

5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701
	del = NULL;
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);

	if (tfcp) {
		tfcp->d_id = fcport->d_id;
		tfcp->port_type = fcport->port_type;
		tfcp->supported_classes = fcport->supported_classes;
		tfcp->flags |= fcport->flags;

		del = fcport;
		fcport = tfcp;
	} else {
		if (vha->hw->current_topology == ISP_CFG_F)
			fcport->flags |= FCF_FABRIC_DEVICE;

		list_add_tail(&fcport->list, &vha->vp_fcports);
		if (!IS_SW_RESV_ADDR(fcport->d_id))
		   vha->fcport_count++;
		fcport->login_gen++;
		fcport->disc_state = DSC_LOGIN_COMPLETE;
		fcport->login_succ = 1;
		newfcport = 1;
	}

	fcport->deleted = 0;
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

	switch (vha->host->active_mode) {
	case MODE_INITIATOR:
	case MODE_DUAL:
		if (newfcport) {
			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5702
				ql_dbg(ql_dbg_disc, vha, 0x20fe,
5703 5704 5705 5706
				   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_upd_fcport_work(vha, fcport);
			} else {
5707
				ql_dbg(ql_dbg_disc, vha, 0x20ff,
5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721
				   "%s %d %8phC post gpsc fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_gpsc_work(vha, fcport);
			}
		}
		break;

	case MODE_TARGET:
	default:
		break;
	}
	if (del)
		qla2x00_free_fcport(del);

5722 5723 5724 5725
	return fcport;
}

/* Must be called under tgt_mutex */
5726
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
5727 5728
	uint8_t *s_id)
{
5729
	struct fc_port *sess = NULL;
5730 5731 5732 5733
	fc_port_t *fcport = NULL;
	int rc, global_resets;
	uint16_t loop_id = 0;

5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744
	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
		/*
		 * This is Domain Controller, so it should be
		 * OK to drop SCSI commands from it.
		 */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
		    "Unable to find initiator with S_ID %x:%x:%x",
		    s_id[0], s_id[1], s_id[2]);
		return NULL;
	}

5745 5746
	mutex_lock(&vha->vha_tgt.tgt_mutex);

5747
retry:
5748 5749
	global_resets =
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5750 5751 5752

	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
	if (rc != 0) {
5753 5754
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

5755 5756 5757 5758 5759
		ql_log(ql_log_info, vha, 0xf071,
		    "qla_target(%d): Unable to find "
		    "initiator with S_ID %x:%x:%x",
		    vha->vp_idx, s_id[0], s_id[1],
		    s_id[2]);
5760 5761 5762 5763 5764 5765 5766 5767

		if (rc == -ENOENT) {
			qlt_port_logo_t logo;
			sid_to_portid(s_id, &logo.id);
			logo.cmd_count = 1;
			qlt_send_first_logo(vha, &logo);
		}

5768 5769 5770 5771
		return NULL;
	}

	fcport = qlt_get_port_database(vha, loop_id);
5772 5773
	if (!fcport) {
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
5774
		return NULL;
5775
	}
5776 5777

	if (global_resets !=
5778
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5779 5780 5781 5782
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
		    "qla_target(%d): global reset during session discovery "
		    "(counter was %d, new %d), retrying", vha->vp_idx,
		    global_resets,
5783 5784
		    atomic_read(&vha->vha_tgt.
			qla_tgt->tgt_global_resets_count));
5785 5786 5787 5788 5789
		goto retry;
	}

	sess = qlt_create_sess(vha, fcport, true);

5790 5791
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

5792 5793 5794 5795 5796 5797 5798 5799
	return sess;
}

static void qlt_abort_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5800
	struct fc_port *sess = NULL;
5801
	unsigned long flags = 0, flags2 = 0;
5802 5803 5804 5805
	uint32_t be_s_id;
	uint8_t s_id[3];
	int rc;

5806
	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5807 5808

	if (tgt->tgt_stop)
5809
		goto out_term2;
5810 5811 5812 5813 5814 5815 5816 5817

	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];

	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    (unsigned char *)&be_s_id);
	if (!sess) {
5818
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5819 5820 5821 5822

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

5823
		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5824
		if (!sess)
5825
			goto out_term2;
5826
	} else {
5827
		if (sess->deleted) {
5828
			sess = NULL;
5829
			goto out_term2;
5830 5831
		}

5832
		if (!kref_get_unless_zero(&sess->sess_kref)) {
5833
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
5834 5835 5836 5837 5838
			    "%s: kref_get fail %8phC \n",
			     __func__, sess->port_name);
			sess = NULL;
			goto out_term2;
		}
5839 5840 5841
	}

	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5842 5843 5844
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);

5845 5846 5847 5848
	if (rc != 0)
		goto out_term;
	return;

5849
out_term2:
5850 5851 5852
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5853

5854
out_term:
5855
	spin_lock_irqsave(&ha->hardware_lock, flags);
5856
	qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5857
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5858 5859 5860 5861 5862 5863 5864 5865
}

static void qlt_tmr_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct atio_from_isp *a = &prm->tm_iocb2;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5866
	struct fc_port *sess = NULL;
5867 5868 5869
	unsigned long flags;
	uint8_t *s_id = NULL; /* to hide compiler warnings */
	int rc;
5870
	u64 unpacked_lun;
5871
	int fn;
5872 5873
	void *iocb;

5874
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5875 5876

	if (tgt->tgt_stop)
5877
		goto out_term2;
5878 5879 5880 5881

	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
5882
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5883 5884 5885 5886

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

5887
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5888
		if (!sess)
5889
			goto out_term2;
5890
	} else {
5891
		if (sess->deleted) {
5892
			sess = NULL;
5893
			goto out_term2;
5894 5895
		}

5896
		if (!kref_get_unless_zero(&sess->sess_kref)) {
5897
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
5898 5899 5900
			    "%s: kref_get fail %8phC\n",
			     __func__, sess->port_name);
			sess = NULL;
5901
			goto out_term2;
5902
		}
5903 5904 5905 5906
	}

	iocb = a;
	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5907 5908
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
5909 5910

	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5911
	ha->tgt.tgt_ops->put_sess(sess);
5912
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5913 5914 5915

	if (rc != 0)
		goto out_term;
5916 5917
	return;

5918 5919 5920 5921
out_term2:
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5922
out_term:
5923
	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974
}

static void qlt_sess_work_fn(struct work_struct *work)
{
	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
	struct scsi_qla_host *vha = tgt->vha;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		struct qla_tgt_sess_work_param *prm = list_entry(
		    tgt->sess_works_list.next, typeof(*prm),
		    sess_works_list_entry);

		/*
		 * This work can be scheduled on several CPUs at time, so we
		 * must delete the entry to eliminate double processing
		 */
		list_del(&prm->sess_works_list_entry);

		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

		switch (prm->type) {
		case QLA_TGT_SESS_WORK_ABORT:
			qlt_abort_work(tgt, prm);
			break;
		case QLA_TGT_SESS_WORK_TM:
			qlt_tmr_work(tgt, prm);
			break;
		default:
			BUG_ON(1);
			break;
		}

		spin_lock_irqsave(&tgt->sess_work_lock, flags);

		kfree(prm);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
}

/* Must be called under tgt_host_action_mutex */
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
	struct qla_tgt *tgt;

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

5975 5976 5977 5978 5979 5980
	if (!IS_TGT_MODE_CAPABLE(ha)) {
		ql_log(ql_log_warn, base_vha, 0xe070,
		    "This adapter does not support target mode.\n");
		return 0;
	}

5981
	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
5982
	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5983

5984
	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004

	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
	if (!tgt) {
		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
		    "Unable to allocate struct qla_tgt\n");
		return -ENOMEM;
	}

	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
		base_vha->host->hostt->supported_mode |= MODE_TARGET;

	tgt->ha = ha;
	tgt->vha = base_vha;
	init_waitqueue_head(&tgt->waitQ);
	INIT_LIST_HEAD(&tgt->del_sess_list);
	spin_lock_init(&tgt->sess_work_lock);
	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
	INIT_LIST_HEAD(&tgt->sess_works_list);
	atomic_set(&tgt->tgt_global_resets_count, 0);

6005
	base_vha->vha_tgt.qla_tgt = tgt;
6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019

	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
		"qla_target(%d): using 64 Bit PCI addressing",
		base_vha->vp_idx);
	tgt->tgt_enable_64bit_addr = 1;
	/* 3 is reserved */
	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;

	mutex_lock(&qla_tgt_mutex);
	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
	mutex_unlock(&qla_tgt_mutex);

6020 6021 6022
	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
		ha->tgt.tgt_ops->add_target(base_vha);

6023 6024 6025 6026 6027 6028
	return 0;
}

/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
6029
	if (!vha->vha_tgt.qla_tgt)
6030 6031
		return 0;

6032 6033 6034 6035
	if (vha->fc_vport) {
		qlt_release(vha->vha_tgt.qla_tgt);
		return 0;
	}
6036 6037 6038 6039

	/* free left over qfull cmds */
	qlt_init_term_exchange(vha);

6040
	mutex_lock(&qla_tgt_mutex);
6041
	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6042 6043 6044 6045
	mutex_unlock(&qla_tgt_mutex);

	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
	    vha->host_no, ha);
6046
	qlt_release(vha->vha_tgt.qla_tgt);
6047 6048 6049 6050

	return 0;
}

6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061
void qlt_remove_target_resources(struct qla_hw_data *ha)
{
	struct scsi_qla_host *node;
	u32 key = 0;

	btree_for_each_safe32(&ha->tgt.host_map, key, node)
		btree_remove32(&ha->tgt.host_map, key);

	btree_destroy32(&ha->tgt.host_map);
}

6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
	unsigned char *b)
{
	int i;

	pr_debug("qla2xxx HW vha->node_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->node_name[i]);
	pr_debug("\n");
	pr_debug("qla2xxx HW vha->port_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->port_name[i]);
	pr_debug("\n");

	pr_debug("qla2xxx passed configfs WWPN: ");
	put_unaligned_be64(wwpn, b);
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", b[i]);
	pr_debug("\n");
}

/**
 * qla_tgt_lport_register - register lport with external module
 *
 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
 * @wwpn: Passwd FC target WWPN
 * @callback:  lport initialization callback for tcm_qla2xxx code
 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
 */
6091 6092 6093
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
		       u64 npiv_wwpn, u64 npiv_wwnn,
		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115
{
	struct qla_tgt *tgt;
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha;
	struct Scsi_Host *host;
	unsigned long flags;
	int rc;
	u8 b[WWN_SIZE];

	mutex_lock(&qla_tgt_mutex);
	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
		vha = tgt->vha;
		ha = vha->hw;

		host = vha->host;
		if (!host)
			continue;

		if (!(host->hostt->supported_mode & MODE_TARGET))
			continue;

		spin_lock_irqsave(&ha->hardware_lock, flags);
6116
		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6117 6118 6119 6120 6121
			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
			    host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6122 6123 6124 6125 6126 6127
		if (tgt->tgt_stop) {
			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
				 host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6128 6129 6130 6131 6132 6133 6134 6135
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

		if (!scsi_host_get(host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe068,
			    "Unable to scsi_host_get() for"
			    " qla2xxx scsi_host\n");
			continue;
		}
6136
		qlt_lport_dump(vha, phys_wwpn, b);
6137 6138 6139 6140 6141

		if (memcmp(vha->port_name, b, WWN_SIZE)) {
			scsi_host_put(host);
			continue;
		}
6142 6143 6144 6145
		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
		if (rc != 0)
			scsi_host_put(host);

6146
		mutex_unlock(&qla_tgt_mutex);
6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166
		return rc;
	}
	mutex_unlock(&qla_tgt_mutex);

	return -ENODEV;
}
EXPORT_SYMBOL(qlt_lport_register);

/**
 * qla_tgt_lport_deregister - Degister lport
 *
 * @vha:  Registered scsi_qla_host pointer
 */
void qlt_lport_deregister(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	struct Scsi_Host *sh = vha->host;
	/*
	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
	 */
6167
	vha->vha_tgt.target_lport_ptr = NULL;
6168 6169 6170 6171 6172 6173 6174 6175 6176
	ha->tgt.tgt_ops = NULL;
	/*
	 * Release the Scsi_Host reference for the underlying qla2xxx host
	 */
	scsi_host_put(sh);
}
EXPORT_SYMBOL(qlt_lport_deregister);

/* Must be called under HW lock */
6177
static void qlt_set_mode(struct scsi_qla_host *vha)
6178 6179 6180 6181 6182 6183 6184
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_TARGET;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6185 6186 6187 6188
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_DUAL;
6189 6190 6191 6192 6193 6194 6195
		break;
	default:
		break;
	}
}

/* Must be called under HW lock */
6196
static void qlt_clear_mode(struct scsi_qla_host *vha)
6197 6198 6199 6200 6201 6202 6203 6204 6205
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_INITIATOR;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6206 6207
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_INITIATOR;
6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222
		break;
	default:
		break;
	}
}

/*
 * qla_tgt_enable_vha - NO LOCK HELD
 *
 * host_reset, bring up w/ Target Mode Enabled
 */
void
qlt_enable_vha(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
6223
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6224
	unsigned long flags;
6225
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6226
	int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe069,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	tgt->tgt_stopped = 0;
	qlt_set_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

6241 6242 6243 6244
	if (vha->vp_idx) {
		qla24xx_disable_vp(vha);
		qla24xx_enable_vp(vha);
	} else {
6245
		if (ha->msix_entries) {
6246
			ql_dbg(ql_dbg_tgt, vha, 0xe081,
6247 6248 6249 6250 6251 6252 6253 6254 6255
			    "%s: host%ld : vector %d cpu %d\n",
			    __func__, vha->host_no,
			    ha->msix_entries[rspq_ent].vector,
			    ha->msix_entries[rspq_ent].cpuid);

			ha->tgt.rspq_vector_cpuid =
			    ha->msix_entries[rspq_ent].cpuid;
		}

6256 6257 6258 6259
		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
		qla2xxx_wake_dpc(base_vha);
		qla2x00_wait_for_hba_online(base_vha);
	}
6260 6261 6262 6263 6264 6265 6266 6267
}
EXPORT_SYMBOL(qlt_enable_vha);

/*
 * qla_tgt_disable_vha - NO LOCK HELD
 *
 * Disable Target Mode and reset the adapter
 */
6268
static void qlt_disable_vha(struct scsi_qla_host *vha)
6269 6270
{
	struct qla_hw_data *ha = vha->hw;
6271
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298
	unsigned long flags;

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_clear_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
	qla2xxx_wake_dpc(vha);
	qla2x00_wait_for_hba_online(vha);
}

/*
 * Called from qla_init.c:qla24xx_vport_create() contex to setup
 * the target mode specific struct scsi_qla_host and struct qla_hw_data
 * members.
 */
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
6299 6300 6301 6302
	vha->vha_tgt.qla_tgt = NULL;

	mutex_init(&vha->vha_tgt.tgt_mutex);
	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6303 6304 6305 6306 6307 6308 6309 6310 6311 6312

	qlt_clear_mode(vha);

	/*
	 * NOTE: Currently the value is kept the same for <24xx and
	 * >=24xx ISPs. If it is necessary to change it,
	 * the check should be added for specific ISPs,
	 * assigning the value appropriately.
	 */
	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6313 6314

	qlt_add_target(ha, vha);
6315 6316 6317 6318 6319 6320 6321 6322 6323
}

void
qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
{
	/*
	 * FC-4 Feature bit 0 indicates target functionality to the name server.
	 */
	if (qla_tgt_mode_enabled(vha)) {
6324
		ct_req->req.rff_id.fc4_feature = BIT_0;
6325 6326
	} else if (qla_ini_mode_enabled(vha)) {
		ct_req->req.rff_id.fc4_feature = BIT_1;
6327 6328
	} else if (qla_dual_mode_enabled(vha))
		ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346
}

/*
 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
 * @ha: HA context
 *
 * Beginning of ATIO ring has initialization control block already built
 * by nvram config routine.
 *
 * Returns 0 on success.
 */
void
qlt_init_atio_q_entries(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t cnt;
	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;

6347
	if (qla_ini_mode_enabled(vha))
6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361
		return;

	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
		pkt->u.raw.signature = ATIO_PROCESSED;
		pkt++;
	}

}

/*
 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
 * @ha: SCSI driver HA context
 */
void
6362
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6363 6364 6365 6366 6367
{
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *pkt;
	int cnt, i;

6368
	if (!ha->flags.fw_started)
6369 6370
		return;

6371 6372
	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6373 6374 6375
		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		cnt = pkt->u.raw.entry_count;

6376 6377 6378 6379 6380 6381
		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
			/*
			 * This packet is corrupted. The header + payload
			 * can not be trusted. There is no point in passing
			 * it further up.
			 */
6382
			ql_log(ql_log_warn, vha, 0xd03c,
6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393
			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
			    pkt->u.isp24.fcp_hdr.s_id,
			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);

			adjust_corrupted_atio(pkt);
			qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
		} else {
			qlt_24xx_atio_pkt_all_vps(vha,
			    (struct atio_from_isp *)pkt, ha_locked);
		}
6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409

		for (i = 0; i < cnt; i++) {
			ha->tgt.atio_ring_index++;
			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
				ha->tgt.atio_ring_index = 0;
				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
			} else
				ha->tgt.atio_ring_ptr++;

			pkt->u.raw.signature = ATIO_PROCESSED;
			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		}
		wmb();
	}

	/* Adjust ring index */
6410
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6411
	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6412 6413 6414
}

void
6415
qlt_24xx_config_rings(struct scsi_qla_host *vha)
6416 6417
{
	struct qla_hw_data *ha = vha->hw;
6418 6419
	if (!QLA_TGT_MODE_ENABLED())
		return;
6420

6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432
	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));

	if (IS_ATIO_MSIX_CAPABLE(ha)) {
		struct qla_msix_entry *msix = &ha->msix_entries[2];
		struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;

		icb->msix_atio = cpu_to_le16(msix->entry);
		ql_dbg(ql_dbg_init, vha, 0xf072,
		    "Registering ICB vector 0x%x for atio que.\n",
		    msix->entry);
6433 6434 6435 6436 6437 6438 6439
	}
}

void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
	struct qla_hw_data *ha = vha->hw;
6440 6441 6442

	if (!QLA_TGT_MODE_ENABLED())
		return;
6443

6444
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6457
		if (qla_tgt_mode_enabled(vha))
6458
			nv->exchange_count = cpu_to_le16(0xFFFF);
6459 6460
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6461 6462

		/* Enable target mode */
6463
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6464 6465

		/* Disable ini mode, if requested */
6466
		if (qla_tgt_mode_enabled(vha))
6467
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6468 6469

		/* Disable Full Login after LIP */
6470
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6471
		/* Enable initial LIP */
6472
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6473 6474 6475 6476 6477 6478 6479
		if (ql2xtgt_tape_enable)
			/* Enable FC Tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC Tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6480
		/* Disable Full Login after LIP */
6481
		nv->host_p &= cpu_to_le32(~BIT_10);
6482 6483 6484 6485 6486 6487 6488

		/*
		 * clear BIT 15 explicitly as we have seen at least
		 * a couple of instances where this was set and this
		 * was causing the firmware to not be initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6489
		/* Enable target PRLI control */
6490
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

	if (ha->tgt.enable_class_2) {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6509
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6510 6511 6512 6513
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6514
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6515 6516 6517 6518 6519 6520 6521 6522 6523
	}
}

void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_24xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

Q
Quinn Tran 已提交
6524 6525 6526
	if (!QLA_TGT_MODE_ENABLED())
		return;

6527 6528
	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6529
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6530
	}
Q
Quinn Tran 已提交
6531 6532 6533 6534 6535 6536 6537 6538

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}
6539 6540
}

6541 6542 6543 6544 6545 6546 6547 6548
void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

6549
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6562
		if (qla_tgt_mode_enabled(vha))
6563
			nv->exchange_count = cpu_to_le16(0xFFFF);
6564 6565
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6566 6567

		/* Enable target mode */
6568
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6569 6570

		/* Disable ini mode, if requested */
6571
		if (qla_tgt_mode_enabled(vha))
6572
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6573
		/* Disable Full Login after LIP */
6574
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6575
		/* Enable initial LIP */
6576
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6577 6578 6579 6580 6581 6582 6583
		/*
		 * clear BIT 15 explicitly as we have seen at
		 * least a couple of instances where this was set
		 * and this was causing the firmware to not be
		 * initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6584 6585 6586 6587 6588 6589 6590
		if (ql2xtgt_tape_enable)
			/* Enable FC tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6591
		/* Disable Full Login after LIP */
6592
		nv->host_p &= cpu_to_le32(~BIT_10);
6593
		/* Enable target PRLI control */
6594
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

	if (ha->tgt.enable_class_2) {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6613
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6614 6615 6616 6617
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6618
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632
	}
}

void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_81xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6633
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6634
	}
Q
Quinn Tran 已提交
6635 6636 6637 6638 6639 6640 6641 6642 6643

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}

6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654
}

void
qlt_83xx_iospace_config(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	ha->msix_count += 1; /* For ATIO Q */
}

6655 6656 6657 6658 6659 6660 6661 6662 6663
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
	struct sts_entry_24xx *pkt)
{
	switch (pkt->entry_type) {
	case ABTS_RECV_24XX:
	case ABTS_RESP_24XX:
	case CTIO_TYPE7:
	case NOTIFY_ACK_TYPE:
6664
	case CTIO_CRC2:
6665 6666 6667 6668 6669 6670 6671 6672 6673 6674
		return 1;
	default:
		return 0;
	}
}

void
qlt_modify_vp_config(struct scsi_qla_host *vha,
	struct vp_config_entry_24xx *vpmod)
{
6675 6676
	/* enable target mode.  Bit5 = 1 => disable */
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
6677
		vpmod->options_idx1 &= ~BIT_5;
6678

6679
	/* Disable ini mode, if requested.  bit4 = 1 => disable */
6680
	if (qla_tgt_mode_enabled(vha))
6681 6682 6683 6684 6685 6686
		vpmod->options_idx1 &= ~BIT_4;
}

void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
6687 6688
	int rc;

6689 6690 6691
	if (!QLA_TGT_MODE_ENABLED())
		return;

6692
	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6693 6694 6695 6696 6697 6698 6699
		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
	} else {
		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
	}

6700 6701
	mutex_init(&base_vha->vha_tgt.tgt_mutex);
	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6702 6703 6704 6705 6706

	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
	    qlt_unknown_atio_work_fn);

6707
	qlt_clear_mode(base_vha);
6708 6709 6710

	rc = btree_init32(&ha->tgt.host_map);
	if (rc)
6711
		ql_log(ql_log_info, base_vha, 0xd03d,
6712 6713 6714
		    "Unable to initialize ha->host_map btree\n");

	qlt_update_vp_map(base_vha, SET_VP_IDX);
6715 6716
}

6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728
irqreturn_t
qla83xx_msix_atio_q(int irq, void *dev_id)
{
	struct rsp_que *rsp;
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	unsigned long flags;

	rsp = (struct rsp_que *) dev_id;
	ha = rsp->hw;
	vha = pci_get_drvdata(ha->pdev);

6729
	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6730

6731
	qlt_24xx_process_atio_queue(vha, 0);
6732

6733
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6734 6735 6736 6737

	return IRQ_HANDLED;
}

6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756
static void
qlt_handle_abts_recv_work(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
		struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

	if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
		return;

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
	qlt_24xx_process_atio_queue(vha, 0);
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6757 6758

	kfree(op);
6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783
}

void
qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
{
	struct qla_tgt_sess_op *op;

	op = kzalloc(sizeof(*op), GFP_ATOMIC);

	if (!op) {
		/* do not reach for ATIO queue here.  This is best effort err
		 * recovery at this point.
		 */
		qlt_response_pkt_all_vps(vha, pkt);
		return;
	}

	memcpy(&op->atio, pkt, sizeof(*pkt));
	op->vha = vha;
	op->chip_reset = vha->hw->chip_reset;
	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
	queue_work(qla_tgt_wq, &op->work);
	return;
}

6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
	if (!ha->tgt.tgt_vp_map)
		return -ENOMEM;

	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
	    &ha->tgt.atio_dma, GFP_KERNEL);
	if (!ha->tgt.atio_ring) {
		kfree(ha->tgt.tgt_vp_map);
		return -ENOMEM;
	}
	return 0;
}

void
qlt_mem_free(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.atio_ring) {
		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
		    ha->tgt.atio_dma);
	}
	kfree(ha->tgt.tgt_vp_map);
}

/* vport_slock to be held by the caller */
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
6823 6824 6825 6826
	void *slot;
	u32 key;
	int rc;

6827 6828 6829
	if (!QLA_TGT_MODE_ENABLED())
		return;

6830 6831
	key = vha->d_id.b24;

6832 6833 6834 6835 6836
	switch (cmd) {
	case SET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
		break;
	case SET_AL_PA:
6837 6838
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (!slot) {
6839
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
6840 6841 6842 6843
			    "Save vha in host_map %p %06x\n", vha, key);
			rc = btree_insert32(&vha->hw->tgt.host_map,
				key, vha, GFP_ATOMIC);
			if (rc)
6844
				ql_log(ql_log_info, vha, 0xd03e,
6845 6846 6847 6848
				    "Unable to insert s_id into host_map: %06x\n",
				    key);
			return;
		}
6849 6850
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
		    "replace existing vha in host_map %p %06x\n", vha, key);
6851
		btree_update32(&vha->hw->tgt.host_map, key, vha);
6852 6853 6854 6855 6856
		break;
	case RESET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
		break;
	case RESET_AL_PA:
6857
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
6858 6859 6860 6861 6862
		   "clear vha in host_map %p %06x\n", vha, key);
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (slot)
			btree_remove32(&vha->hw->tgt.host_map, key);
		vha->d_id.b24 = 0;
6863 6864 6865 6866
		break;
	}
}

6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
	unsigned long flags;
	struct qla_hw_data *ha = vha->hw;

	if (!vha->d_id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	} else if (vha->d_id.b24 != id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		qlt_update_vp_map(vha, RESET_AL_PA);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	}
}

6886 6887 6888 6889 6890 6891 6892 6893
static int __init qlt_parse_ini_mode(void)
{
	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6894 6895
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918
	else
		return false;

	return true;
}

int __init qlt_init(void)
{
	int ret;

	if (!qlt_parse_ini_mode()) {
		ql_log(ql_log_fatal, NULL, 0xe06b,
		    "qlt_parse_ini_mode() failed\n");
		return -EINVAL;
	}

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
	    qla_tgt_mgmt_cmd), 0, NULL);
	if (!qla_tgt_mgmt_cmd_cachep) {
6919
		ql_log(ql_log_fatal, NULL, 0xd04b,
6920
		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6921
		return -ENOMEM;
6922 6923
	}

6924
	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6925 6926
	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
	    0, NULL);
6927 6928 6929 6930 6931 6932 6933 6934

	if (!qla_tgt_plogi_cachep) {
		ql_log(ql_log_fatal, NULL, 0xe06d,
		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
		ret = -ENOMEM;
		goto out_mgmt_cmd_cachep;
	}

6935 6936 6937 6938 6939 6940
	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
	if (!qla_tgt_mgmt_cmd_mempool) {
		ql_log(ql_log_fatal, NULL, 0xe06e,
		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
		ret = -ENOMEM;
6941
		goto out_plogi_cachep;
6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957
	}

	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
	if (!qla_tgt_wq) {
		ql_log(ql_log_fatal, NULL, 0xe06f,
		    "alloc_workqueue for qla_tgt_wq failed\n");
		ret = -ENOMEM;
		goto out_cmd_mempool;
	}
	/*
	 * Return 1 to signal that initiator-mode is being disabled
	 */
	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;

out_cmd_mempool:
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6958 6959
out_plogi_cachep:
	kmem_cache_destroy(qla_tgt_plogi_cachep);
6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971
out_mgmt_cmd_cachep:
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
	return ret;
}

void qlt_exit(void)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	destroy_workqueue(qla_tgt_wq);
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6972
	kmem_cache_destroy(qla_tgt_plogi_cachep);
6973 6974
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
}