qla_target.c 187.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
 *
 *  based on qla2x00t.c code:
 *
 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
 *  Copyright (C) 2004 - 2005 Leonid Stoljar
 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
 *  Copyright (C) 2006 - 2010 ID7 Ltd.
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
13
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation, version 2
 *  of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>

#include "qla_def.h"
#include "qla_target.h"

45 46 47 48 49
static int ql2xtgt_tape_enable;
module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xtgt_tape_enable,
		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");

50 51 52 53 54 55 56 57
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
	"Determines when initiator mode will be enabled. Possible values: "
	"\"exclusive\" - initiator mode will be enabled on load, "
	"disabled on enabling target mode and then on disabling target mode "
	"enabled back; "
	"\"disabled\" - initiator mode will never be enabled; "
58 59
	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
	"when ready "
60 61
	"\"enabled\" (default) - initiator mode will always stay enabled.");

62
static int ql_dm_tgt_ex_pct = 0;
63 64 65 66 67 68
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
	"For Dual Mode (qlini_mode=dual), this parameter determines "
	"the percentage of exchanges/cmds FW will allocate resources "
	"for Target mode.");

69
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
70

71 72
static int temp_sam_status = SAM_STAT_BUSY;

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * From scsi/fc/fc_fcp.h
 */
enum fcp_resp_rsp_codes {
	FCP_TMF_CMPL = 0,
	FCP_DATA_LEN_INVALID = 1,
	FCP_CMND_FIELDS_INVALID = 2,
	FCP_DATA_PARAM_MISMATCH = 3,
	FCP_TMF_REJECTED = 4,
	FCP_TMF_FAILED = 5,
	FCP_TMF_INVALID_LUN = 9,
};

/*
 * fc_pri_ta from scsi/fc/fc_fcp.h
 */
#define FCP_PTA_SIMPLE      0   /* simple task attribute */
#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
#define FCP_PTA_ORDERED     2   /* ordered task attribute */
92
#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#define FCP_PTA_MASK        7   /* mask for task attribute field */
#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */

/*
 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 * must be called under HW lock and could unlock/lock it inside.
 * It isn't an issue, since in the current implementation on the time when
 * those functions are called:
 *
 *   - Either context is IRQ and only IRQ handler can modify HW data,
 *     including rings related fields,
 *
 *   - Or access to target mode variables from struct qla_tgt doesn't
 *     cross those functions boundaries, except tgt_stop, which
 *     additionally protected by irq_cmd_count.
 */
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
112
	struct atio_from_isp *pkt, uint8_t);
113
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
114
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
115 116
	int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
117
	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
118 119
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd);
120 121
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull);
122
static void qlt_disable_vha(struct scsi_qla_host *vha);
123
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
124 125 126 127
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
128 129
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked);
130 131 132
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
	fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
133 134 135
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
	struct abts_recv_from_24xx *);

136 137 138 139
/*
 * Global Variables
 */
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
140
static struct kmem_cache *qla_tgt_plogi_cachep;
141 142 143 144 145
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);

146 147 148 149 150 151 152 153 154 155 156 157 158 159
static const char *prot_op_str(u32 prot_op)
{
	switch (prot_op) {
	case TARGET_PROT_NORMAL:	return "NORMAL";
	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
	default:			return "UNKNOWN";
	}
}

160 161 162 163 164 165 166 167 168 169
/* This API intentionally takes dest as a parameter, rather than returning
 * int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
{
	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
	*dest = atomic_inc_return(&base_vha->generation_tick);
	/* memory barrier */
	wmb();
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
	/* Send marker if required */
	if (unlikely(vha->marker_needed != 0)) {
		int rc = qla2x00_issue_marker(vha, vha_locked);
		if (rc != QLA_SUCCESS) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
			    "qla_target(%d): issue_marker() failed\n",
			    vha->vp_idx);
		}
		return rc;
	}
	return QLA_SUCCESS;
}

static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
	uint8_t *d_id)
{
190 191
	struct scsi_qla_host *host;
	uint32_t key = 0;
192

193 194
	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
	    (vha->d_id.b.al_pa == d_id[2]))
195 196
		return vha;

197 198 199
	key  = (uint32_t)d_id[0] << 16;
	key |= (uint32_t)d_id[1] <<  8;
	key |= (uint32_t)d_id[2];
200

201 202
	host = btree_lookup32(&vha->hw->tgt.host_map, key);
	if (!host)
203 204
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
		    "Unable to find host %06x\n", key);
205 206

	return host;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
}

static inline
struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
	uint16_t vp_idx)
{
	struct qla_hw_data *ha = vha->hw;

	if (vha->vp_idx == vp_idx)
		return vha;

	BUG_ON(ha->tgt.tgt_vp_map == NULL);
	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
		return ha->tgt.tgt_vp_map[vp_idx].vha;

	return NULL;
}

225 226 227 228 229 230 231
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);

	vha->hw->tgt.num_pend_cmds++;
232 233
	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
		vha->qla_stats.stat_max_pend_cmds =
234 235 236 237 238 239 240 241 242 243 244 245
			vha->hw->tgt.num_pend_cmds;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
	vha->hw->tgt.num_pend_cmds--;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}

246 247 248 249 250 251 252 253 254

static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
	struct atio_from_isp *atio,	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;

	if (tgt->tgt_stop) {
255 256 257
		ql_dbg(ql_dbg_async, vha, 0x502c,
		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
		    vha->vp_idx);
258 259 260 261
		goto out_term;
	}

	u = kzalloc(sizeof(*u), GFP_ATOMIC);
262
	if (u == NULL)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
		goto out_term;

	u->vha = vha;
	memcpy(&u->atio, atio, sizeof(*atio));
	INIT_LIST_HEAD(&u->cmd_list);

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	schedule_delayed_work(&vha->unknown_atio_work, 1);

out:
	return;

out_term:
	qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
	goto out;
}

static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u, *t;
	scsi_qla_host_t *host;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;
	uint8_t queued = 0;

	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
		if (u->aborted) {
294 295
			ql_dbg(ql_dbg_async, vha, 0x502e,
			    "Freeing unknown %s %p, because of Abort\n",
296 297 298 299 300 301 302 303
			    "ATIO_TYPE7", u);
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
			goto abort;
		}

		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
		if (host != NULL) {
304 305
			ql_dbg(ql_dbg_async, vha, 0x502f,
			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
306 307
			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
		} else if (tgt->tgt_stop) {
308 309 310
			ql_dbg(ql_dbg_async, vha, 0x503a,
			    "Freeing unknown %s %p, because tgt is being stopped\n",
			    "ATIO_TYPE7", u);
311 312 313
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
		} else {
314 315
			ql_dbg(ql_dbg_async, vha, 0x503d,
			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
			if (!queued) {
				queued = 1;
				schedule_delayed_work(&vha->unknown_atio_work,
				    1);
			}
			continue;
		}

abort:
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
		list_del(&u->cmd_list);
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
		kfree(u);
	}
}

void qlt_unknown_atio_work_fn(struct work_struct *work)
{
	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
	    struct scsi_qla_host, unknown_atio_work);

	qlt_try_to_dequeue_unknown_atios(vha, 0);
}

340
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
341
	struct atio_from_isp *atio, uint8_t ha_locked)
342
{
343 344 345 346 347
	ql_dbg(ql_dbg_tgt, vha, 0xe072,
		"%s: qla_target(%d): type %x ox_id %04x\n",
		__func__, vha->vp_idx, atio->u.raw.entry_type,
		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));

348 349 350 351 352 353 354 355 356 357 358 359
	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
	{
		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
		    atio->u.isp24.fcp_hdr.d_id);
		if (unlikely(NULL == host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
			    "qla_target(%d): Received ATIO_TYPE7 "
			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
			    atio->u.isp24.fcp_hdr.d_id[0],
			    atio->u.isp24.fcp_hdr.d_id[1],
			    atio->u.isp24.fcp_hdr.d_id[2]);
360 361 362


			qlt_queue_unknown_atio(vha, atio, ha_locked);
363 364
			break;
		}
365 366 367
		if (unlikely(!list_empty(&vha->unknown_atio_list)))
			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);

368
		qlt_24xx_atio_pkt(host, atio, ha_locked);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)atio;

		if ((entry->u.isp24.vp_index != 0xFF) &&
		    (entry->u.isp24.nport_handle != 0xFFFF)) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
				    "qla_target(%d): Received "
				    "ATIO (IMMED_NOTIFY_TYPE) "
				    "with unknown vp_index %d\n",
				    vha->vp_idx, entry->u.isp24.vp_index);
				break;
			}
		}
391
		qlt_24xx_atio_pkt(host, atio, ha_locked);
392 393 394
		break;
	}

395 396 397 398 399 400 401 402 403 404 405
	case VP_RPT_ID_IOCB_TYPE:
		qla24xx_report_id_acquisition(vha,
			(struct vp_rpt_id_entry_24xx *)atio);
		break;

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
			(struct abts_recv_from_24xx *)atio;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
			entry->vp_index);
406 407
		unsigned long flags;

408
		if (unlikely(!host)) {
409
			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
410 411 412 413 414
			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
415 416 417 418 419
		if (!ha_locked)
			spin_lock_irqsave(&host->hw->hardware_lock, flags);
		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
		if (!ha_locked)
			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
420 421 422 423 424
		break;
	}

	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */

425 426 427 428 429 430 431
	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe040,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

432
	return false;
433 434 435 436 437
}

void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
{
	switch (pkt->entry_type) {
438 439 440 441
	case CTIO_CRC2:
		ql_dbg(ql_dbg_tgt, vha, 0xe073,
			"qla_target(%d):%s: CRC2 Response pkt\n",
			vha->vp_idx, __func__);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe041,
			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)pkt;

		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe042,
			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->u.isp24.vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case NOTIFY_ACK_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;

		if (0xFF != entry->u.isp24.vp_index) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe043,
				    "qla_target(%d): Response "
				    "pkt (NOTIFY_ACK_TYPE) "
				    "received, with unknown "
				    "vp_index %d\n", vha->vp_idx,
				    entry->u.isp24.vp_index);
				break;
			}
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
		    (struct abts_recv_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe044,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RESP_24XX:
	{
		struct abts_resp_to_24xx *entry =
		    (struct abts_resp_to_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe045,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	default:
		qlt_response_pkt(vha, pkt);
		break;
	}

}

539 540 541
/*
 * All qlt_plogi_ack_t operations are protected by hardware_lock
 */
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	struct qla_work_evt *e;
	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.nack.fcport = fcport;
	e->u.nack.type = type;
	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
	return qla2x00_post_work(vha, e);
}

static
557
void qla2x00_async_nack_sp_done(void *s, int res)
558 559
{
	struct srb *sp = (struct srb *)s;
560
	struct scsi_qla_host *vha = sp->vha;
561 562
	unsigned long flags;

563 564 565
	ql_dbg(ql_dbg_disc, vha, 0x20f2,
	    "Async done-%s res %x %8phC  type %d\n",
	    sp->name, res, sp->fcport->port_name, sp->type);
566 567 568 569 570 571 572 573 574 575

	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	sp->fcport->flags &= ~FCF_ASYNC_SENT;
	sp->fcport->chip_reset = vha->hw->chip_reset;

	switch (sp->type) {
	case SRB_NACK_PLOGI:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
		sp->fcport->logout_on_delete = 1;
576
		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
577 578 579 580 581 582 583 584 585 586 587 588 589 590
		break;

	case SRB_NACK_PRLI:
		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
		sp->fcport->deleted = 0;

		if (!sp->fcport->login_succ &&
		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
			sp->fcport->login_succ = 1;

			vha->fcport_count++;

			if (!IS_IIDMA_CAPABLE(vha->hw) ||
			    !vha->hw->flags.gpsc_supported) {
591 592 593 594 595
				ql_dbg(ql_dbg_disc, vha, 0x20f3,
				    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
596 597 598

				qla24xx_post_upd_fcport_work(vha, sp->fcport);
			} else {
599 600 601 602 603
				ql_dbg(ql_dbg_disc, vha, 0x20f5,
				    "%s %d %8phC post gpsc fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
604 605 606 607 608 609 610 611 612 613 614 615 616 617

				qla24xx_post_gpsc_work(vha, sp->fcport);
			}
		}
		break;

	case SRB_NACK_LOGO:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
		break;
	}
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

618
	sp->free(sp);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	int rval = QLA_FUNCTION_FAILED;
	srb_t *sp;
	char *c = NULL;

	fcport->flags |= FCF_ASYNC_SENT;
	switch (type) {
	case SRB_NACK_PLOGI:
		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
		c = "PLOGI";
		break;
	case SRB_NACK_PRLI:
		fcport->fw_login_state = DSC_LS_PRLI_PEND;
636
		fcport->deleted = 0;
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
		c = "PRLI";
		break;
	case SRB_NACK_LOGO:
		fcport->fw_login_state = DSC_LS_LOGO_PEND;
		c = "LOGO";
		break;
	}

	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
	if (!sp)
		goto done;

	sp->type = type;
	sp->name = "nack";

	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);

	sp->u.iocb_cmd.u.nack.ntfy = ntfy;

	sp->done = qla2x00_async_nack_sp_done;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS)
		goto done_free_sp;

662 663 664
	ql_dbg(ql_dbg_disc, vha, 0x20f4,
	    "Async-%s %8phC hndl %x %s\n",
	    sp->name, fcport->port_name, sp->handle, c);
665 666 667 668

	return rval;

done_free_sp:
669
	sp->free(sp);
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
done:
	fcport->flags &= ~FCF_ASYNC_SENT;
	return rval;
}

void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
	fc_port_t *t;
	unsigned long flags;

	switch (e->u.nack.type) {
	case SRB_NACK_PRLI:
		mutex_lock(&vha->vha_tgt.tgt_mutex);
		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
		if (t) {
686
			ql_log(ql_log_info, vha, 0xd034,
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
			    "%s create sess success %p", __func__, t);
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
			/* create sess has an extra kref */
			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
		}
		break;
	}
	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
}

void qla24xx_delete_sess_fn(struct work_struct *work)
{
	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
	struct qla_hw_data *ha = fcport->vha->hw;
	unsigned long flags;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);

	if (fcport->se_sess) {
		ha->tgt.tgt_ops->shutdown_sess(fcport);
		ha->tgt.tgt_ops->put_sess(fcport);
	} else {
		qlt_unreg_sess(fcport);
	}
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

/*
 * Called from qla2x00_reg_remote_port()
 */
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct fc_port *sess = fcport;
	unsigned long flags;

	if (!vha->hw->tgt.tgt_ops)
		return;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (tgt->tgt_stop) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (fcport->disc_state == DSC_DELETE_PEND) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (!sess->se_sess) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		mutex_lock(&vha->vha_tgt.tgt_mutex);
		sess = qlt_create_sess(vha, fcport, false);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	} else {
		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		if (!kref_get_unless_zero(&sess->sess_kref)) {
755
			ql_dbg(ql_dbg_disc, vha, 0x2107,
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
			    "%s: kref_get fail sess %8phC \n",
			    __func__, sess->port_name);
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
		    "qla_target(%u): %ssession for port %8phC "
		    "(loop ID %d) reappeared\n", vha->vp_idx,
		    sess->local ? "local " : "", sess->port_name, sess->loop_id);

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
		    "Reappeared sess %p\n", sess);

		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
		    fcport->loop_id,
		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
	}

	if (sess && sess->local) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
		    "qla_target(%u): local session for "
		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
		    fcport->port_name, sess->loop_id);
		sess->local = 0;
	}
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
785 786 787 788 789 790

/*
 * This is a zero-base ref-counting solution, since hardware_lock
 * guarantees that ref_count is not modified concurrently.
 * Upon successful return content of iocb is undefined
 */
791
static struct qlt_plogi_ack_t *
792 793 794
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
		       struct imm_ntfy_from_isp *iocb)
{
795
	struct qlt_plogi_ack_t *pla;
796 797 798 799

	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
		if (pla->id.b24 == id->b24) {
			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
800
			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
801 802 803 804 805 806 807 808 809 810 811 812
			return pla;
		}
	}

	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
	if (!pla) {
		ql_dbg(ql_dbg_async, vha, 0x5088,
		       "qla_target(%d): Allocation of plogi_ack failed\n",
		       vha->vp_idx);
		return NULL;
	}

813
	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
814 815 816 817 818 819
	pla->id = *id;
	list_add_tail(&pla->list, &vha->plogi_ack_list);

	return pla;
}

820
void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
821
    struct qlt_plogi_ack_t *pla)
822
{
823
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
824 825 826 827
	port_id_t port_id;
	uint16_t loop_id;
	fc_port_t *fcport = pla->fcport;

828 829 830 831 832 833
	BUG_ON(!pla->ref_count);
	pla->ref_count--;

	if (pla->ref_count)
		return;

834
	ql_dbg(ql_dbg_disc, vha, 0x5089,
835
	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
836 837 838 839 840
	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
	    iocb->u.isp24.port_id[0],
	    le16_to_cpu(iocb->u.isp24.nport_handle),
	    iocb->u.isp24.exchange_address, iocb->ox_id);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	fcport->loop_id = loop_id;
	fcport->d_id = port_id;
	qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
	}
859 860 861 862 863

	list_del(&pla->list);
	kmem_cache_free(qla_tgt_plogi_cachep, pla);
}

864
void
865 866
qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
    struct fc_port *sess, enum qlt_plogi_link_t link)
867
{
868
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
869 870 871
	/* Inc ref_count first because link might already be pointing at pla */
	pla->ref_count++;

872 873 874 875 876 877 878 879
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
		sess, link, sess->port_name,
		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		pla->ref_count, pla, link);

880 881 882
	if (sess->plogi_link[link])
		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);

883 884
	if (link == QLT_PLOGI_LINK_SAME_WWN)
		pla->fcport = sess;
885 886 887 888

	sess->plogi_link[link] = pla;
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
typedef struct {
	/* These fields must be initialized by the caller */
	port_id_t id;
	/*
	 * number of cmds dropped while we were waiting for
	 * initiator to ack LOGO initialize to 1 if LOGO is
	 * triggered by a command, otherwise, to 0
	 */
	int cmd_count;

	/* These fields are used by callee */
	struct list_head list;
} qlt_port_logo_t;

static void
qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
{
	qlt_port_logo_t *tmp;
	int res;

	mutex_lock(&vha->vha_tgt.tgt_mutex);

	list_for_each_entry(tmp, &vha->logo_list, list) {
		if (tmp->id.b24 == logo->id.b24) {
			tmp->cmd_count += logo->cmd_count;
			mutex_unlock(&vha->vha_tgt.tgt_mutex);
			return;
		}
	}

	list_add_tail(&logo->list, &vha->logo_list);

	mutex_unlock(&vha->vha_tgt.tgt_mutex);

	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);

	mutex_lock(&vha->vha_tgt.tgt_mutex);
	list_del(&logo->list);
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

929 930 931 932
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
	    logo->cmd_count, res);
933 934
}

935 936
static void qlt_free_session_done(struct work_struct *work)
{
937
	struct fc_port *sess = container_of(work, struct fc_port,
938 939 940 941
	    free_work);
	struct qla_tgt *tgt = sess->tgt;
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
942 943
	unsigned long flags;
	bool logout_started = false;
944
	struct event_arg ea;
945
	scsi_qla_host_t *base_vha;
946 947 948

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
949
		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
950
		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
951
		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
952
		sess->logout_on_delete, sess->keep_nport_handle,
953
		sess->send_els_logo);
954

955

956
	if (!IS_SW_RESV_ADDR(sess->d_id)) {
957 958
		if (sess->send_els_logo) {
			qlt_port_logo_t logo;
959

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
			logo.id = sess->d_id;
			logo.cmd_count = 0;
			qlt_send_first_logo(vha, &logo);
		}

		if (sess->logout_on_delete) {
			int rc;

			rc = qla2x00_post_async_logout_work(vha, sess, NULL);
			if (rc != QLA_SUCCESS)
				ql_log(ql_log_warn, vha, 0xf085,
				    "Schedule logo failed sess %p rc %d\n",
				    sess, rc);
			else
				logout_started = true;
		}
976
	}
977

978 979 980 981 982 983
	/*
	 * Release the target session for FC Nexus from fabric module code.
	 */
	if (sess->se_sess != NULL)
		ha->tgt.tgt_ops->free_session(sess);

984 985 986 987 988 989 990 991 992 993 994 995 996
	if (logout_started) {
		bool traced = false;

		while (!ACCESS_ONCE(sess->logout_completed)) {
			if (!traced) {
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
					"%s: waiting for sess %p logout\n",
					__func__, sess);
				traced = true;
			}
			msleep(100);
		}

997
		ql_dbg(ql_dbg_disc, vha, 0xf087,
998 999 1000 1001 1002 1003 1004
		    "%s: sess %p logout completed\n",__func__, sess);
	}

	if (sess->logo_ack_needed) {
		sess->logo_ack_needed = 0;
		qla24xx_async_notify_ack(vha, sess,
			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1005 1006
	}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (sess->se_sess) {
		sess->se_sess = NULL;
		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
			tgt->sess_count--;
	}

	sess->disc_state = DSC_DELETED;
	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
	sess->deleted = QLA_SESS_DELETED;
	sess->login_retry = vha->hw->login_retry_count;

	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
		vha->fcport_count--;
		sess->login_succ = 0;
	}

	if (sess->chip_reset != sess->vha->hw->chip_reset)
		qla2x00_clear_loop_id(sess);

	if (sess->conflict) {
		sess->conflict->login_pause = 0;
		sess->conflict = NULL;
		if (!test_bit(UNLOADING, &vha->dpc_flags))
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
	}

1034
	{
1035
		struct qlt_plogi_ack_t *own =
1036
		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1037
		struct qlt_plogi_ack_t *con =
1038
		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1039
		struct imm_ntfy_from_isp *iocb;
1040 1041

		if (con) {
1042
			iocb = &con->iocb;
1043
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1044 1045 1046 1047 1048 1049
				 "se_sess %p / sess %p port %8phC is gone,"
				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
				 sess->se_sess, sess, sess->port_name,
				 own ? "releasing own PLOGI" : "no own PLOGI pending",
				 own ? own->ref_count : -1,
				 iocb->u.isp24.port_name, con->ref_count);
1050
			qlt_plogi_ack_unref(vha, con);
1051
			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1052 1053 1054 1055 1056 1057 1058 1059 1060
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
			    sess->se_sess, sess, sess->port_name,
			    own ? "releasing own PLOGI" :
			    "no own PLOGI pending",
			    own ? own->ref_count : -1);
		}

1061 1062
		if (own) {
			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1063
			qlt_plogi_ack_unref(vha, own);
1064 1065
			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		}
1066
	}
1067 1068
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1069
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1070 1071
	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
		sess, sess->port_name, vha->fcport_count);
1072

1073
	if (tgt && (tgt->sess_count == 0))
1074
		wake_up_all(&tgt->waitQ);
1075 1076 1077 1078

	if (vha->fcport_count == 0)
		wake_up_all(&vha->fcport_waitQ);

1079 1080 1081 1082
	base_vha = pci_get_drvdata(ha->pdev);
	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
		return;

1083 1084 1085 1086 1087 1088
	if (!tgt || !tgt->tgt_stop) {
		memset(&ea, 0, sizeof(ea));
		ea.event = FCME_DELETE_DONE;
		ea.fcport = sess;
		qla2x00_fcport_event_handler(vha, &ea);
	}
1089 1090
}

1091
/* ha->tgt.sess_lock supposed to be held on entry */
1092
void qlt_unreg_sess(struct fc_port *sess)
1093 1094 1095
{
	struct scsi_qla_host *vha = sess->vha;

1096
	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1097 1098 1099
	    "%s sess %p for deletion %8phC\n",
	    __func__, sess, sess->port_name);

1100 1101
	if (sess->se_sess)
		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1102

1103 1104
	qla2x00_mark_device_lost(vha, sess, 1, 1);

1105
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1106 1107 1108
	sess->disc_state = DSC_DELETE_PEND;
	sess->last_rscn_gen = sess->rscn_gen;
	sess->last_login_gen = sess->login_gen;
1109 1110 1111 1112

	INIT_WORK(&sess->free_work, qlt_free_session_done);
	schedule_work(&sess->free_work);
}
1113
EXPORT_SYMBOL(qlt_unreg_sess);
1114

1115 1116 1117
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
	struct qla_hw_data *ha = vha->hw;
1118
	struct fc_port *sess = NULL;
1119 1120 1121
	uint16_t loop_id;
	int res = 0;
	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1122
	unsigned long flags;
1123 1124 1125 1126

	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
	if (loop_id == 0xFFFF) {
		/* Global event */
1127
		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1128
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1129
		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1130
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1131
	} else {
1132
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1133
		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1134
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe000,
	    "Using sess for qla_tgt_reset: %p\n", sess);
	if (!sess) {
		res = -ESRCH;
		return res;
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1145 1146
	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1147 1148
	    mcmd, loop_id);

1149
	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1150 1151
}

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
	if (sess->chip_reset != sess->vha->hw->chip_reset) {
		sess->logout_on_delete = 0;
		sess->logo_ack_needed = 0;
		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
		sess->scan_state = 0;
	}
}

1162
/* ha->tgt.sess_lock supposed to be held on entry */
1163
void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1164 1165 1166 1167
	bool immediate)
{
	struct qla_tgt *tgt = sess->tgt;

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (sess->disc_state == DSC_DELETE_PEND)
		return;

	if (sess->disc_state == DSC_DELETED) {
		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
			wake_up_all(&tgt->waitQ);
		if (sess->vha->fcport_count == 0)
			wake_up_all(&sess->vha->fcport_waitQ);

		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1179 1180
			return;
	}
1181

1182
	sess->disc_state = DSC_DELETE_PEND;
1183

1184 1185 1186 1187 1188
	if (sess->deleted == QLA_SESS_DELETED)
		sess->logout_on_delete = 0;

	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
	qla24xx_chk_fcp_state(sess);
1189

1190 1191
	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
	    "Scheduling sess %p for deletion\n", sess);
1192

1193 1194
	schedule_work(&sess->del_work);
}
1195

1196 1197 1198 1199 1200 1201 1202
void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
{
	unsigned long flags;
	struct qla_hw_data *ha = sess->vha->hw;
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	qlt_schedule_sess_for_deletion(sess, 1);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1203 1204
}

1205
/* ha->tgt.sess_lock supposed to be held on entry */
J
Joern Engel 已提交
1206
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1207
{
1208 1209
	struct fc_port *sess;
	scsi_qla_host_t *vha = tgt->vha;
1210

1211 1212
	list_for_each_entry(sess, &vha->vp_fcports, list) {
		if (sess->se_sess)
1213
			qlt_schedule_sess_for_deletion(sess, 1);
1214
	}
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	/* At this point tgt could be already dead */
}

static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
	uint16_t *loop_id)
{
	struct qla_hw_data *ha = vha->hw;
	dma_addr_t gid_list_dma;
	struct gid_list_info *gid_list;
	char *id_iter;
	int res, rc, i;
	uint16_t entries;

	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    &gid_list_dma, GFP_KERNEL);
	if (!gid_list) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
		    "qla_target(%d): DMA Alloc failed of %u\n",
		    vha->vp_idx, qla2x00_gid_list_size(ha));
		return -ENOMEM;
	}

	/* Get list of logged in devices */
1239
	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1240 1241 1242 1243
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
		    "qla_target(%d): get_id_list() failed: %x\n",
		    vha->vp_idx, rc);
1244
		res = -EBUSY;
1245 1246 1247 1248
		goto out_free_id_list;
	}

	id_iter = (char *)gid_list;
1249
	res = -ENOENT;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	for (i = 0; i < entries; i++) {
		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
		if ((gid->al_pa == s_id[2]) &&
		    (gid->area == s_id[1]) &&
		    (gid->domain == s_id[0])) {
			*loop_id = le16_to_cpu(gid->loop_id);
			res = 0;
			break;
		}
		id_iter += ha->gid_list_info_size;
	}

out_free_id_list:
	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    gid_list, gid_list_dma);
	return res;
}

/*
 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
 * Caller must put it.
 */
1272
static struct fc_port *qlt_create_sess(
1273 1274 1275 1276 1277
	struct scsi_qla_host *vha,
	fc_port_t *fcport,
	bool local)
{
	struct qla_hw_data *ha = vha->hw;
1278
	struct fc_port *sess = fcport;
1279 1280
	unsigned long flags;

1281 1282
	if (vha->vha_tgt.qla_tgt->tgt_stop)
		return NULL;
1283

1284 1285
	if (fcport->se_sess) {
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1286
			ql_dbg(ql_dbg_disc, vha, 0x20f6,
1287 1288 1289
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
1290
		}
1291
		return fcport;
1292
	}
1293
	sess->tgt = vha->vha_tgt.qla_tgt;
1294
	sess->local = local;
1295

1296 1297
	/*
	 * Under normal circumstances we want to logout from firmware when
1298 1299
	 * session eventually ends and release corresponding nport handle.
	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1300 1301
	 * code will adjust these flags as necessary.
	 */
1302 1303
	sess->logout_on_delete = 1;
	sess->keep_nport_handle = 0;
1304
	sess->logout_completed = 0;
1305

1306 1307
	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
	    &fcport->port_name[0], sess) < 0) {
1308
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		    "(%d) %8phC check_initiator_node_acl failed\n",
		    vha->vp_idx, fcport->port_name);
		return NULL;
	} else {
		kref_init(&fcport->sess_kref);
		/*
		 * Take an extra reference to ->sess_kref here to handle
		 * fc_port access across ->tgt.sess_lock reaquire.
		 */
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1319
			ql_dbg(ql_dbg_disc, vha, 0x20f7,
1320 1321 1322 1323
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
		}
1324

1325 1326 1327
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		if (!IS_SW_RESV_ADDR(sess->d_id))
			vha->vha_tgt.qla_tgt->sess_count++;
1328

1329 1330 1331 1332 1333 1334 1335 1336
		qlt_do_generation_tick(vha, &sess->generation);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
	    vha->vha_tgt.qla_tgt->sess_count);
1337 1338

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1339 1340 1341
	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1342 1343
	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1344 1345 1346 1347

	return sess;
}

1348 1349 1350 1351 1352 1353
/*
 * max_gen - specifies maximum session generation
 * at which this deletion requestion is still valid
 */
void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1354
{
1355
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1356
	struct fc_port *sess = fcport;
1357
	unsigned long flags;
1358 1359 1360 1361

	if (!vha->hw->tgt.tgt_ops)
		return;

1362
	if (!tgt)
1363 1364
		return;

1365
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1366
	if (tgt->tgt_stop) {
1367
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1368 1369
		return;
	}
1370
	if (!sess->se_sess) {
1371
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1372 1373 1374
		return;
	}

1375
	if (max_gen - sess->generation < 0) {
1376
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1377 1378 1379 1380 1381 1382 1383 1384
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
		    "Ignoring stale deletion request for se_sess %p / sess %p"
		    " for port %8phC, req_gen %d, sess_gen %d\n",
		    sess->se_sess, sess, sess->port_name, max_gen,
		    sess->generation);
		return;
	}

1385 1386 1387 1388
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);

	sess->local = 1;
	qlt_schedule_sess_for_deletion(sess, false);
1389
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
}

static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;
	int res;
	/*
	 * We need to protect against race, when tgt is freed before or
	 * inside wake_up()
	 */
1401
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1402
	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1403 1404
	    "tgt %p, sess_count=%d\n",
	    tgt, tgt->sess_count);
1405
	res = (tgt->sess_count == 0);
1406
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1407 1408 1409 1410 1411

	return res;
}

/* Called by tcm_qla2xxx configfs code */
1412
int qlt_stop_phase1(struct qla_tgt *tgt)
1413 1414 1415 1416 1417
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
	mutex_lock(&qla_tgt_mutex);
	if (!vha->fc_vport) {
		struct Scsi_Host *sh = vha->host;
		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
		bool npiv_vports;

		spin_lock_irqsave(sh->host_lock, flags);
		npiv_vports = (fc_host->npiv_vports_inuse);
		spin_unlock_irqrestore(sh->host_lock, flags);

		if (npiv_vports) {
			mutex_unlock(&qla_tgt_mutex);
			return -EPERM;
		}
	}
1433 1434 1435
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1436 1437
		mutex_unlock(&qla_tgt_mutex);
		return -EPERM;
1438 1439 1440 1441 1442 1443 1444 1445
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
	    vha->host_no, vha);
	/*
	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
	 * Lock is needed, because we still can get an incoming packet.
	 */
1446
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1447
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1448
	tgt->tgt_stop = 1;
J
Joern Engel 已提交
1449
	qlt_clear_tgt_db(tgt);
1450
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1451
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1452
	mutex_unlock(&qla_tgt_mutex);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
	    "Waiting for sess works (tgt %p)", tgt);
	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
		flush_scheduled_work();
		spin_lock_irqsave(&tgt->sess_work_lock, flags);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1465
	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1466 1467 1468 1469

	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));

	/* Big hammer */
1470 1471
	if (!ha->flags.host_shutting_down &&
	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1472 1473 1474 1475
		qlt_disable_vha(vha);

	/* Wait for sessions to clear out (just in case) */
	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1476
	return 0;
1477 1478 1479 1480 1481 1482 1483
}
EXPORT_SYMBOL(qlt_stop_phase1);

/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
1484
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1485 1486 1487
	unsigned long flags;

	if (tgt->tgt_stopped) {
1488
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1489 1490 1491 1492 1493
		    "Already in tgt->tgt_stopped state\n");
		dump_stack();
		return;
	}

1494
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1495 1496 1497
	    "Waiting for %d IRQ commands to complete (tgt %p)",
	    tgt->irq_cmd_count, tgt);

1498
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1499
	spin_lock_irqsave(&ha->hardware_lock, flags);
1500
	while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1501 1502 1503 1504 1505 1506 1507
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		udelay(2);
		spin_lock_irqsave(&ha->hardware_lock, flags);
	}
	tgt->tgt_stop = 0;
	tgt->tgt_stopped = 1;
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1508
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1509

1510
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1511 1512 1513 1514 1515
	    tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);

/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1516
static void qlt_release(struct qla_tgt *tgt)
1517
{
1518
	scsi_qla_host_t *vha = tgt->vha;
1519

1520
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1521 1522
		qlt_stop_phase2(tgt);

1523
	vha->vha_tgt.qla_tgt = NULL;
1524

1525
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
	    "Release of tgt %p finished\n", tgt);

	kfree(tgt);
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
	const void *param, unsigned int param_size)
{
	struct qla_tgt_sess_work_param *prm;
	unsigned long flags;

	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
	if (!prm) {
		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
		    "qla_target(%d): Unable to create session "
		    "work, command will be refused", 0);
		return -ENOMEM;
	}

	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
	    "Scheduling work (type %d, prm %p)"
	    " to find session for param %p (size %d, tgt %p)\n",
	    type, prm, param, param_size, tgt);

	prm->type = type;
	memcpy(&prm->tm_iocb, param, param_size);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	schedule_work(&tgt->sess_work);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	struct nack_to_isp *nack;

1575 1576 1577
	if (!ha->flags.fw_started)
		return;

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
	if (!pkt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe049,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1592 1593
	if (vha->vha_tgt.qla_tgt != NULL)
		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1594 1595 1596 1597 1598 1599 1600

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

1601
	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1602 1603 1604
	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
1605
			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1606 1607 1608 1609
	}
	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1610
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
	nack->u.isp24.srr_reject_code = srr_reject_code;
	nack->u.isp24.srr_reject_code_expl = srr_explan;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	ql_dbg(ql_dbg_tgt, vha, 0xe005,
	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
	    vha->vp_idx, nack->u.isp24.status);

1623 1624
	/* Memory Barrier */
	wmb();
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts, uint32_t status,
	bool ids_reversed)
{
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl;
	uint8_t *p;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
	    ha, abts, status);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1648
	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	if (!resp) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
		return;
	}

	resp->entry_type = ABTS_RESP_24XX;
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
1663
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;
	if (ids_reversed) {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
	} else {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
	}
	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (status == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

1700
	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1701

1702 1703
	/* Memory Barrier */
	wmb();
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
	struct abts_resp_from_24xx_fw *entry)
{
	struct ctio7_to_24xx *ctio;

	ql_dbg(ql_dbg_tgt, vha, 0xe007,
	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1721
	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

	/*
	 * We've got on entrance firmware's response on by us generated
	 * ABTS response. So, in it ID fields are reversed.
	 */

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->nport_handle = entry->nport_handle;
	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1738
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1739 1740 1741 1742 1743
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
	ctio->exchange_addr = entry->exchange_addr_to_abort;
1744 1745
	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
					    CTIO7_FLAGS_TERMINATE);
1746
	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1747

1748 1749
	/* Memory Barrier */
	wmb();
1750 1751 1752 1753 1754 1755
	qla2x00_start_iocbs(vha, vha->req);

	qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
	    FCP_TMF_CMPL, true);
}

1756 1757 1758 1759
static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
1760
	unsigned long flags;
1761

1762
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1763 1764 1765
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1766
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1767 1768 1769 1770
			return 1;
		}
	}

1771 1772 1773
	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1774
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1775 1776 1777 1778
			return 1;
		}
	}

1779 1780
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		if (tag == cmd->atio.u.isp24.exchange_addr) {
1781
			cmd->aborted = 1;
1782
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1783 1784 1785
			return 1;
		}
	}
1786
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796

	return 0;
}

/* drop cmds for the given lun
 * XXX only looks for cmds on the port through which lun reset was recieved
 * XXX does not go through the list of other port (which may have cmds
 *     for the same lun)
 */
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1797
			        u64 lun, uint8_t *s_id)
1798 1799 1800 1801
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
1802
	unsigned long flags;
1803 1804

	key = sid_to_key(s_id);
1805
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1806 1807
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key;
1808
		u64 op_lun;
1809 1810 1811 1812 1813 1814 1815

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key;
		u64 op_lun;

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}

1828 1829
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key;
1830
		u64 cmd_lun;
1831 1832 1833 1834 1835

		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		cmd_lun = scsilun_to_int(
			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
		if (cmd_key == key && cmd_lun == lun)
1836
			cmd->aborted = 1;
1837
	}
1838
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1839 1840
}

1841 1842
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1843
	struct abts_recv_from_24xx *abts, struct fc_port *sess)
1844 1845
{
	struct qla_hw_data *ha = vha->hw;
1846
	struct se_session *se_sess = sess->se_sess;
1847
	struct qla_tgt_mgmt_cmd *mcmd;
1848
	struct qla_tgt_cmd *cmd;
1849
	struct se_cmd *se_cmd;
1850
	int rc;
1851
	bool found_lun = false;
1852
	unsigned long flags;
1853

1854
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1855
	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1856
		if (se_cmd->tag == abts->exchange_addr_to_abort) {
1857 1858 1859 1860
			found_lun = true;
			break;
		}
	}
1861
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1862

1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
	/* cmd not in LIO lists, look in qla list */
	if (!found_lun) {
		if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
			/* send TASK_ABORT response immediately */
			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
			return 0;
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
			    "unable to find cmd in driver or LIO for tag 0x%x\n",
			    abts->exchange_addr_to_abort);
			return -ENOENT;
		}
	}
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
	    "qla_target(%d): task abort (tag=%d)\n",
	    vha->vp_idx, abts->exchange_addr_to_abort);

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

1890
	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1891 1892
	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1893
	mcmd->reset_count = vha->hw->chip_reset;
1894
	mcmd->tmr_func = QLA_TGT_ABTS;
1895

1896
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
	    abts->exchange_addr_to_abort);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
		    "qla_target(%d):  tgt_ops->handle_tmr()"
		    " failed: %d", vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts)
{
	struct qla_hw_data *ha = vha->hw;
1916
	struct fc_port *sess;
1917 1918 1919
	uint32_t tag = abts->exchange_addr_to_abort;
	uint8_t s_id[3];
	int rc;
1920
	unsigned long flags;
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947

	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
		    "qla_target(%d): ABTS: Abort Sequence not "
		    "supported\n", vha->vp_idx);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
		    "qla_target(%d): ABTS: Unknown Exchange "
		    "Address received\n", vha->vp_idx);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
	    le32_to_cpu(abts->fcp_hdr_le.parameter));

	s_id[0] = abts->fcp_hdr_le.s_id[2];
	s_id[1] = abts->fcp_hdr_le.s_id[1];
	s_id[2] = abts->fcp_hdr_le.s_id[0];

1948
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1949 1950 1951 1952 1953
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
		    "qla_target(%d): task abort for non-existant session\n",
		    vha->vp_idx);
1954
		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1955
		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1956 1957 1958

		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1959 1960 1961 1962 1963 1964
		if (rc != 0) {
			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
			    false);
		}
		return;
	}
1965 1966
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1967

1968
	if (sess->deleted) {
1969 1970 1971 1972
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	rc = __qlt_24xx_handle_abts(vha, abts, sess);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
		    vha->vp_idx, rc);
		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
		return;
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
	struct ctio7_to_24xx *ctio;
1991
	uint16_t temp;
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012

	ql_dbg(ql_dbg_tgt, ha, 0xe008,
	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
	    ha, atio, resp_code);

	/* Send marker if required */
	if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
		return;

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", ha->vp_idx, __func__);
		return;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
	ctio->nport_handle = mcmd->sess->loop_id;
2013
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2014 2015 2016 2017 2018
	ctio->vp_index = ha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2019 2020 2021
	temp = (atio->u.isp24.attr << 9)|
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2022 2023
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
2024
	ctio->u.status1.scsi_status =
2025 2026
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
	ctio->u.status1.response_len = cpu_to_le16(8);
2027
	ctio->u.status1.sense_data[0] = resp_code;
2028

2029 2030
	/* Memory Barrier */
	wmb();
2031 2032 2033 2034 2035 2036 2037 2038 2039
	qla2x00_start_iocbs(ha, ha->req);
}

void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
}
EXPORT_SYMBOL(qlt_free_mcmd);

2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then
 * reacquire
 */
void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
	struct atio_from_isp *atio = &cmd->atio;
	struct ctio7_to_24xx *ctio;
	uint16_t temp;

	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
	    "sense_key=%02x, asc=%02x, ascq=%02x",
	    vha, atio, scsi_status, sense_key, asc, ascq);

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
	if (!ctio) {
		ql_dbg(ql_dbg_async, vha, 0x3067,
		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
		    vha->host_no, __func__);
		goto out;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE;
	ctio->nport_handle = cmd->sess->loop_id;
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2074 2075 2076
	temp = (atio->u.isp24.attr << 9) |
	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
	ctio->u.status1.scsi_status =
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
	ctio->u.status1.response_len = cpu_to_le16(18);
	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));

	if (ctio->u.status1.residual != 0)
		ctio->u.status1.scsi_status |=
		    cpu_to_le16(SS_RESIDUAL_UNDER);

	/* Response code and sense key */
	put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
	    (&ctio->u.status1.sense_data)[0]);
	/* Additional sense length */
	put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
	/* ASC and ASCQ */
	put_unaligned_le32(((asc << 24) | (ascq << 16)),
	    (&ctio->u.status1.sense_data)[3]);

	/* Memory Barrier */
	wmb();

	qla2x00_start_iocbs(vha, vha->req);
out:
	return;
}

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
	struct scsi_qla_host *vha = mcmd->sess->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
	    "TM response mcmd (%p) status %#x state %#x",
	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
2117

2118
	if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
2119
		/*
2120
		 * Either the port is not online or this request was from
2121 2122 2123
		 * previous life, just abort the processing.
		 */
		ql_dbg(ql_dbg_async, vha, 0xe100,
2124 2125 2126
			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			mcmd->reset_count, ha->chip_reset);
2127 2128 2129 2130 2131
		ha->tgt.tgt_ops->free_mcmd(mcmd);
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return;
	}

2132 2133
	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2134 2135 2136 2137 2138
		    ELS_LOGO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_PRLO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_TPRLO) {
2139
			ql_dbg(ql_dbg_disc, vha, 0x2106,
2140 2141 2142 2143 2144 2145 2146 2147 2148
			    "TM response logo %phC status %#x state %#x",
			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
			    mcmd->flags);
			qlt_schedule_sess_for_deletion_lock(mcmd->sess);
		} else {
			qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
				0, 0, 0, 0, 0, 0);
		}
	} else {
2149
		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
			qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
			    mcmd->fc_tm_rsp, false);
		else
			qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
			    mcmd->fc_tm_rsp);
	}
	/*
	 * Make the callback for ->free_mcmd() to queue_work() and invoke
	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
	 * qlt_xmit_tm_rsp() returns here..
	 */
	ha->tgt.tgt_ops->free_mcmd(mcmd);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);

/* No locks */
static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd = prm->cmd;

	BUG_ON(cmd->sg_cnt == 0);

	prm->sg = (struct scatterlist *)cmd->sg;
	prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
	    cmd->sg_cnt, cmd->dma_data_direction);
	if (unlikely(prm->seg_cnt == 0))
		goto out_err;

	prm->cmd->sg_mapped = 1;

2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
		/*
		 * If greater than four sg entries then we need to allocate
		 * the continuation entries
		 */
		if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
			prm->tgt->datasegs_per_cmd,
			prm->tgt->datasegs_per_cont);
	} else {
		/* DIF */
		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
			prm->tot_dsds = prm->seg_cnt;
		} else
			prm->tot_dsds = prm->seg_cnt;

		if (cmd->prot_sg_cnt) {
			prm->prot_sg      = cmd->prot_sg;
			prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
				cmd->prot_sg, cmd->prot_sg_cnt,
				cmd->dma_data_direction);
			if (unlikely(prm->prot_seg_cnt == 0))
				goto out_err;

			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
				/* Dif Bundling not support here */
				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
								cmd->blk_sz);
				prm->tot_dsds += prm->prot_seg_cnt;
			} else
				prm->tot_dsds += prm->prot_seg_cnt;
		}
	}
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229

	return 0;

out_err:
	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
	    0, prm->cmd->sg_cnt);
	return -1;
}

J
Joern Engel 已提交
2230
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2231 2232 2233
{
	struct qla_hw_data *ha = vha->hw;

J
Joern Engel 已提交
2234 2235 2236
	if (!cmd->sg_mapped)
		return;

2237 2238
	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
	cmd->sg_mapped = 0;
2239 2240 2241 2242 2243

	if (cmd->prot_sg_cnt)
		pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
			cmd->dma_data_direction);

2244 2245 2246
	if (!cmd->ctx)
		return;

2247
	if (cmd->ctx_dsd_alloced)
2248
		qla2x00_clean_dsd_pool(ha, cmd->ctx);
2249

2250
	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2251 2252 2253 2254 2255
}

static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
	uint32_t req_cnt)
{
2256
	uint32_t cnt, cnt_in;
2257 2258

	if (vha->req->cnt < (req_cnt + 2)) {
2259
		cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
2260
		cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
2261 2262 2263 2264 2265 2266 2267

		if  (vha->req->ring_index < cnt)
			vha->req->cnt = cnt - vha->req->ring_index;
		else
			vha->req->cnt = vha->req->length -
			    (vha->req->ring_index - cnt);

2268 2269 2270 2271 2272 2273 2274 2275
		if (unlikely(vha->req->cnt < (req_cnt + 2))) {
			ql_dbg(ql_dbg_io, vha, 0x305a,
			    "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
			    vha->vp_idx, vha->req->ring_index,
			    vha->req->cnt, req_cnt, cnt, cnt_in,
			    vha->req->length);
			return -EAGAIN;
		}
2276
	}
2277

2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
	vha->req->cnt -= req_cnt;

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
{
	/* Adjust ring index. */
	vha->req->ring_index++;
	if (vha->req->ring_index == vha->req->length) {
		vha->req->ring_index = 0;
		vha->req->ring_ptr = vha->req->ring;
	} else {
		vha->req->ring_ptr++;
	}
	return (cont_entry_t *)vha->req->ring_ptr;
}

/* ha->hardware_lock supposed to be held on entry */
static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t h;

	h = ha->tgt.current_handle;
	/* always increment cmd handle */
	do {
		++h;
2309
		if (h > DEFAULT_OUTSTANDING_COMMANDS)
2310 2311
			h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
		if (h == ha->tgt.current_handle) {
2312
			ql_dbg(ql_dbg_io, vha, 0x305b,
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
			    "qla_target(%d): Ran out of "
			    "empty cmd slots in ha %p\n", vha->vp_idx, ha);
			h = QLA_TGT_NULL_HANDLE;
			break;
		}
	} while ((h == QLA_TGT_NULL_HANDLE) ||
	    (h == QLA_TGT_SKIP_HANDLE) ||
	    (ha->tgt.cmds[h-1] != NULL));

	if (h != QLA_TGT_NULL_HANDLE)
		ha->tgt.current_handle = h;

	return h;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	uint32_t h;
	struct ctio7_to_24xx *pkt;
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *atio = &prm->cmd->atio;
2336
	uint16_t temp;
2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354

	pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	pkt->entry_type = CTIO_TYPE7;
	pkt->entry_count = (uint8_t)prm->req_cnt;
	pkt->vp_index = vha->vp_idx;

	h = qlt_make_handle(vha);
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
2355
		ha->tgt.cmds[h - 1] = prm->cmd;
2356 2357 2358

	pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
	pkt->nport_handle = prm->cmd->loop_id;
2359
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2360 2361 2362 2363
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr = atio->u.isp24.exchange_addr;
2364 2365
	temp = atio->u.isp24.attr << 9;
	pkt->u.status0.flags |= cpu_to_le16(temp);
2366 2367
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->u.status0.ox_id = cpu_to_le16(temp);
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;

	/* Build continuation packets */
	while (prm->seg_cnt > 0) {
		cont_a64_entry_t *cont_pkt64 =
			(cont_a64_entry_t *)qlt_get_req_pkt(vha);

		/*
		 * Make sure that from cont_pkt64 none of
		 * 64-bit specific fields used for 32-bit
		 * addressing. Cast to (cont_entry_t *) for
		 * that.
		 */

		memset(cont_pkt64, 0, sizeof(*cont_pkt64));

		cont_pkt64->entry_count = 1;
		cont_pkt64->sys_define = 0;

		if (enable_64bit_addressing) {
			cont_pkt64->entry_type = CONTINUE_A64_TYPE;
			dword_ptr =
			    (uint32_t *)&cont_pkt64->dseg_0_address;
		} else {
			cont_pkt64->entry_type = CONTINUE_TYPE;
			dword_ptr =
			    (uint32_t *)&((cont_entry_t *)
				cont_pkt64)->dseg_0_address;
		}

		/* Load continuation entry data segments */
		for (cnt = 0;
		    cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
		    cnt++, prm->seg_cnt--) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_lo32
				(sg_dma_address(prm->sg)));
			if (enable_64bit_addressing) {
				*dword_ptr++ =
				    cpu_to_le32(pci_dma_hi32
					(sg_dma_address
					(prm->sg)));
			}
			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

			prm->sg = sg_next(prm->sg);
		}
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;

	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);

	/* Setup packet address segment pointer */
	dword_ptr = pkt24->u.status0.dseg_0_address;

	/* Set total data segment count */
	if (prm->seg_cnt)
		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);

	if (prm->seg_cnt == 0) {
		/* No data transfer */
		*dword_ptr++ = 0;
		*dword_ptr = 0;
		return;
	}

	/* If scatter gather */

	/* Load command entry data segments */
	for (cnt = 0;
	    (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
	    cnt++, prm->seg_cnt--) {
		*dword_ptr++ =
		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
		if (enable_64bit_addressing) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_hi32(
				sg_dma_address(prm->sg)));
		}
		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

		prm->sg = sg_next(prm->sg);
	}

	qlt_load_cont_data_segments(prm, vha);
}

static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
{
	return cmd->bufflen > 0;
}

2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
static void qlt_print_dif_err(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd;
	struct scsi_qla_host *vha;

	/* asc 0x10=dif error */
	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
		cmd = prm->cmd;
		vha = cmd->vha;
		/* ASCQ */
		switch (prm->sense_buffer[13]) {
		case 1:
2498
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2499 2500 2501 2502 2503 2504
			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 2:
2505
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2506 2507 2508 2509 2510 2511
			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 3:
2512
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2513 2514 2515 2516 2517 2518
			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		default:
2519
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2520 2521 2522 2523 2524 2525
			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		}
2526
		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2527 2528 2529
	}
}

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564
/*
 * Called without ha->hardware_lock held
 */
static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
	uint32_t *full_req_cnt)
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd = &cmd->se_cmd;

	prm->cmd = cmd;
	prm->tgt = tgt;
	prm->rq_result = scsi_status;
	prm->sense_buffer = &cmd->sense_buffer[0];
	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
	prm->sg = NULL;
	prm->seg_cnt = -1;
	prm->req_cnt = 1;
	prm->add_status_pkt = 0;

	/* Send marker if required */
	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
		return -EFAULT;

	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
		if  (qlt_pci_map_calc_cnt(prm) != 0)
			return -EAGAIN;
	}

	*full_req_cnt = prm->req_cnt;

	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2565
		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2566 2567 2568 2569
		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag,
		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
		       cmd->bufflen, prm->rq_result);
2570 2571 2572
		prm->rq_result |= SS_RESIDUAL_UNDER;
	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2573
		ql_dbg(ql_dbg_io, vha, 0x305d,
2574 2575 2576
		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
		prm->rq_result |= SS_RESIDUAL_OVER;
	}

	if (xmit_type & QLA_TGT_XMIT_STATUS) {
		/*
		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
		 * ignored in *xmit_response() below
		 */
		if (qlt_has_data(cmd)) {
			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
			    (IS_FWI2_CAPABLE(ha) &&
			    (prm->rq_result != 0))) {
				prm->add_status_pkt = 1;
				(*full_req_cnt)++;
			}
		}
	}

	return 0;
}

static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
	struct qla_tgt_cmd *cmd, int sending_sense)
{
	if (ha->tgt.enable_class_2)
		return 0;

	if (sending_sense)
		return cmd->conf_compl_supported;
	else
		return ha->tgt.enable_explicit_conf &&
		    cmd->conf_compl_supported;
}

static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
	struct qla_tgt_prm *prm)
{
	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2616
	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2617
	if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2618
		ctio->u.status0.flags |= cpu_to_le16(
2619 2620 2621 2622 2623 2624 2625 2626 2627
		    CTIO7_FLAGS_EXPLICIT_CONFORM |
		    CTIO7_FLAGS_CONFORM_REQ);
	}
	ctio->u.status0.residual = cpu_to_le32(prm->residual);
	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
		int i;

		if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2628
			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2629 2630 2631 2632 2633 2634
				ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
				    "Skipping EXPLICIT_CONFORM and "
				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
				    "non GOOD status\n");
				goto skip_explict_conf;
			}
2635
			ctio->u.status1.flags |= cpu_to_le16(
2636 2637 2638 2639 2640
			    CTIO7_FLAGS_EXPLICIT_CONFORM |
			    CTIO7_FLAGS_CONFORM_REQ);
		}
skip_explict_conf:
		ctio->u.status1.flags &=
2641
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2642
		ctio->u.status1.flags |=
2643
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2644
		ctio->u.status1.scsi_status |=
2645
		    cpu_to_le16(SS_SENSE_LEN_VALID);
2646 2647 2648 2649 2650
		ctio->u.status1.sense_length =
		    cpu_to_le16(prm->sense_buffer_len);
		for (i = 0; i < prm->sense_buffer_len/4; i++)
			((uint32_t *)ctio->u.status1.sense_data)[i] =
				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2651 2652 2653

		qlt_print_dif_err(prm);

2654 2655
	} else {
		ctio->u.status1.flags &=
2656
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2657
		ctio->u.status1.flags |=
2658
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2659 2660 2661 2662 2663 2664 2665 2666
		ctio->u.status1.sense_length = 0;
		memset(ctio->u.status1.sense_data, 0,
		    sizeof(ctio->u.status1.sense_data));
	}

	/* Sense with len > 24, is it possible ??? */
}

2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
		if (ql2xenablehba_err_chk >= 1)
			return 1;
		break;
	case TARGET_PROT_DOUT_PASS:
	case TARGET_PROT_DIN_PASS:
		if (ql2xenablehba_err_chk >= 2)
			return 1;
		break;
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		return 1;
	default:
		break;
	}
	return 0;
}

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
static inline int
qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
	    return 1;
	default:
	    return 0;
	}
	return 0;
}

2707
/*
2708
 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2709
 */
2710 2711 2712
static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
    uint16_t *pfw_prot_opts)
2713
{
2714
	struct se_cmd *se_cmd = &cmd->se_cmd;
2715
	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2716 2717 2718
	scsi_qla_host_t *vha = cmd->tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	uint32_t t32 = 0;
2719

2720 2721
	/*
	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2722 2723 2724
	 * have been immplemented by TCM, before AppTag is avail.
	 * Look for modesense_handlers[]
	 */
2725
	ctx->app_tag = 0;
2726 2727 2728
	ctx->app_tag_mask[0] = 0x0;
	ctx->app_tag_mask[1] = 0x0;

2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
	if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);

2739 2740 2741
	switch (se_cmd->prot_type) {
	case TARGET_DIF_TYPE0_PROT:
		/*
2742 2743 2744
		 * No check for ql2xenablehba_err_chk, as it
		 * would be an I/O error if hba tag generation
		 * is not done.
2745 2746 2747 2748 2749 2750 2751 2752 2753
		 */
		ctx->ref_tag = cpu_to_le32(lba);
		/* enable ALL bytes of the ref tag */
		ctx->ref_tag_mask[0] = 0xff;
		ctx->ref_tag_mask[1] = 0xff;
		ctx->ref_tag_mask[2] = 0xff;
		ctx->ref_tag_mask[3] = 0xff;
		break;
	case TARGET_DIF_TYPE1_PROT:
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769
	    /*
	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
	     * REF tag, and 16 bit app tag.
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2770
	case TARGET_DIF_TYPE2_PROT:
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
	    /*
	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
	     * tag has to match LBA in CDB + N
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2787
	case TARGET_DIF_TYPE3_PROT:
2788 2789 2790 2791 2792
	    /* For TYPE 3 protection: 16 bit GUARD only */
	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
	    break;
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
	}
}

static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
	uint32_t		*cur_dsd;
	uint32_t		transfer_length = 0;
	uint32_t		data_bytes;
	uint32_t		dif_bytes;
	uint8_t			bundling = 1;
	uint8_t			*clr_ptr;
	struct crc_context	*crc_ctx_pkt = NULL;
	struct qla_hw_data	*ha;
	struct ctio_crc2_to_fw	*pkt;
	dma_addr_t		crc_ctx_dma;
	uint16_t		fw_prot_opts = 0;
	struct qla_tgt_cmd	*cmd = prm->cmd;
	struct se_cmd		*se_cmd = &cmd->se_cmd;
	uint32_t h;
	struct atio_from_isp *atio = &prm->cmd->atio;
2814
	struct qla_tc_param	tc;
2815
	uint16_t t16;
2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839

	ha = vha->hw;

	pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	ql_dbg(ql_dbg_tgt, vha, 0xe071,
		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
		vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);

	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
		bundling = 0;

	/* Compute dif len and adjust data len to incude protection */
	data_bytes = cmd->bufflen;
	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		transfer_length = data_bytes;
2840 2841
		if (cmd->prot_sg_cnt)
			data_bytes += dif_bytes;
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		transfer_length = data_bytes + dif_bytes;
		break;
	default:
		BUG();
		break;
	}

	if (!qlt_hba_err_chk_enabled(se_cmd))
		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
	/* HBA error checking enabled */
	else if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
		fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
		fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		fw_prot_opts |= PO_MODE_DIF_PASS;
		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
		break;
	default:/* Normal Request */
		fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	}

	/* ---- PKT ---- */
	/* Update entry type to indicate Command Type CRC_2 IOCB */
	pkt->entry_type  = CTIO_CRC2;
	pkt->entry_count = 1;
	pkt->vp_index = vha->vp_idx;

	h = qlt_make_handle(vha);
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
		ha->tgt.cmds[h-1] = prm->cmd;

	pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
2902
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2903
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2904 2905 2906 2907
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2908 2909 2910 2911 2912 2913 2914

	/* silence compile warning */
	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->ox_id  = cpu_to_le16(t16);

	t16 = (atio->u.isp24.attr << 9);
	pkt->flags |= cpu_to_le16(t16);
2915 2916 2917 2918
	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);

	/* Set transfer direction */
	if (cmd->dma_data_direction == DMA_TO_DEVICE)
2919
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2920
	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2921
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945

	pkt->dseg_count = prm->tot_dsds;
	/* Fibre channel byte count */
	pkt->transfer_length = cpu_to_le32(transfer_length);

	/* ----- CRC context -------- */

	/* Allocate CRC context from global pool */
	crc_ctx_pkt = cmd->ctx =
	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);

	if (!crc_ctx_pkt)
		goto crc_queuing_error;

	/* Zero out CTX area. */
	clr_ptr = (uint8_t *)crc_ctx_pkt;
	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));

	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);

	/* Set handle */
	crc_ctx_pkt->handle = pkt->handle;

2946
	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969

	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;

	if (!bundling) {
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
	} else {
		/*
		 * Configure Bundling if we need to fetch interlaving
		 * protection PCI accesses
		 */
		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
		crc_ctx_pkt->u.bundling.dseg_count =
			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
	}

	/* Finish the common fields of CRC pkt */
	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2970
	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2971

2972 2973 2974 2975 2976 2977 2978 2979
	memset((uint8_t *)&tc, 0 , sizeof(tc));
	tc.vha = vha;
	tc.blk_sz = cmd->blk_sz;
	tc.bufflen = cmd->bufflen;
	tc.sg = cmd->sg;
	tc.prot_sg = cmd->prot_sg;
	tc.ctx = crc_ctx_pkt;
	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
2980 2981

	/* Walks data segments */
2982
	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2983 2984 2985

	if (!bundling && prm->prot_seg_cnt) {
		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2986
			prm->tot_dsds, &tc))
2987 2988
			goto crc_queuing_error;
	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2989
		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
2990 2991 2992 2993
		goto crc_queuing_error;

	if (bundling && prm->prot_seg_cnt) {
		/* Walks dif segments */
2994
		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2995 2996 2997

		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2998
			prm->prot_seg_cnt, &tc))
2999 3000 3001 3002 3003 3004
			goto crc_queuing_error;
	}
	return QLA_SUCCESS;

crc_queuing_error:
	/* Cleanup will be performed by the caller */
3005
	vha->hw->tgt.cmds[h - 1] = NULL;
3006 3007 3008 3009

	return QLA_FUNCTION_FAILED;
}

3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
/*
 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
 */
int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	uint8_t scsi_status)
{
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
	struct ctio7_to_24xx *pkt;
	struct qla_tgt_prm prm;
	uint32_t full_req_cnt = 0;
	unsigned long flags = 0;
	int res;

3025
	spin_lock_irqsave(&ha->hardware_lock, flags);
3026
	if (cmd->sess && cmd->sess->deleted) {
3027 3028 3029 3030 3031
		cmd->state = QLA_TGT_STATE_PROCESSED;
		if (cmd->sess->logout_completed)
			/* no need to terminate. FW already freed exchange. */
			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		else
3032
			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3033 3034 3035 3036 3037
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

3038 3039 3040
	memset(&prm, 0, sizeof(prm));

	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
3041 3042 3043 3044
	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
	    &cmd->se_cmd);
3045 3046 3047 3048 3049 3050 3051 3052 3053

	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
	    &full_req_cnt);
	if (unlikely(res != 0)) {
		return res;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);

3054 3055 3056 3057 3058
	if (xmit_type == QLA_TGT_XMIT_STATUS)
		vha->tgt_counters.core_qla_snd_status++;
	else
		vha->tgt_counters.core_qla_que_buf++;

3059
	if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
3060
		/*
3061
		 * Either the port is not online or this request was from
3062 3063 3064 3065 3066
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_PROCESSED;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe101,
3067 3068 3069
			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			cmd->reset_count, ha->chip_reset);
3070 3071 3072 3073
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}

3074 3075 3076 3077 3078
	/* Does F/W have an IOCBs for this request */
	res = qlt_check_reserve_free_req(vha, full_req_cnt);
	if (unlikely(res))
		goto out_unmap_unlock;

3079 3080 3081 3082
	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
		res = qlt_build_ctio_crc2_pkt(&prm, vha);
	else
		res = qlt_24xx_build_ctio_pkt(&prm, vha);
3083 3084
	if (unlikely(res != 0)) {
		vha->req->cnt += full_req_cnt;
3085
		goto out_unmap_unlock;
3086
	}
3087 3088 3089 3090 3091

	pkt = (struct ctio7_to_24xx *)prm.pkt;

	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
		pkt->u.status0.flags |=
3092
		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3093 3094
			CTIO7_FLAGS_STATUS_MODE_0);

3095 3096
		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
			qlt_load_data_segments(&prm, vha);
3097 3098 3099 3100 3101 3102 3103

		if (prm.add_status_pkt == 0) {
			if (xmit_type & QLA_TGT_XMIT_STATUS) {
				pkt->u.status0.scsi_status =
				    cpu_to_le16(prm.rq_result);
				pkt->u.status0.residual =
				    cpu_to_le32(prm.residual);
3104
				pkt->u.status0.flags |= cpu_to_le16(
3105 3106 3107
				    CTIO7_FLAGS_SEND_STATUS);
				if (qlt_need_explicit_conf(ha, cmd, 0)) {
					pkt->u.status0.flags |=
3108
					    cpu_to_le16(
3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
						CTIO7_FLAGS_EXPLICIT_CONFORM |
						CTIO7_FLAGS_CONFORM_REQ);
				}
			}

		} else {
			/*
			 * We have already made sure that there is sufficient
			 * amount of request entries to not drop HW lock in
			 * req_pkt().
			 */
			struct ctio7_to_24xx *ctio =
				(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);

3123 3124 3125
			ql_dbg(ql_dbg_io, vha, 0x305e,
			    "Building additional status packet 0x%p.\n",
			    ctio);
3126

3127 3128 3129 3130
			/*
			 * T10Dif: ctio_crc2_to_fw overlay ontop of
			 * ctio7_to_24xx
			 */
3131
			memcpy(ctio, pkt, sizeof(*ctio));
3132
			/* reset back to CTIO7 */
3133
			ctio->entry_count = 1;
3134
			ctio->entry_type = CTIO_TYPE7;
3135
			ctio->dseg_count = 0;
3136
			ctio->u.status1.flags &= ~cpu_to_le16(
3137 3138 3139 3140
			    CTIO7_FLAGS_DATA_IN);

			/* Real finish is ctio_m1's finish */
			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3141
			pkt->u.status0.flags |= cpu_to_le16(
3142
			    CTIO7_FLAGS_DONT_RET_CTIO);
3143 3144 3145 3146 3147

			/* qlt_24xx_init_ctio_to_isp will correct
			 * all neccessary fields that's part of CTIO7.
			 * There should be no residual of CTIO-CRC2 data.
			 */
3148 3149 3150 3151 3152 3153 3154 3155 3156
			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
			    &prm);
			pr_debug("Status CTIO7: %p\n", ctio);
		}
	} else
		qlt_24xx_init_ctio_to_isp(pkt, &prm);


	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3157
	cmd->cmd_sent_to_fw = 1;
3158

3159 3160
	/* Memory Barrier */
	wmb();
3161 3162 3163 3164 3165 3166
	qla2x00_start_iocbs(vha, vha->req);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return 0;

out_unmap_unlock:
J
Joern Engel 已提交
3167
	qlt_unmap_sg(vha, cmd);
3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;
}
EXPORT_SYMBOL(qlt_xmit_response);

int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
	struct ctio7_to_24xx *pkt;
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = cmd->tgt;
	struct qla_tgt_prm prm;
	unsigned long flags;
	int res = 0;

	memset(&prm, 0, sizeof(prm));
	prm.cmd = cmd;
	prm.tgt = tgt;
	prm.sg = NULL;
	prm.req_cnt = 1;

	/* Send marker if required */
	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
		return -EIO;

	/* Calculate number of entries and segments required */
	if (qlt_pci_map_calc_cnt(&prm) != 0)
		return -EAGAIN;

	spin_lock_irqsave(&ha->hardware_lock, flags);

3200
	if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
3201
	    (cmd->sess && cmd->sess->deleted)) {
3202
		/*
3203
		 * Either the port is not online or this request was from
3204 3205 3206 3207 3208
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_NEED_DATA;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe102,
3209 3210 3211
			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
			cmd->reset_count, ha->chip_reset);
3212 3213 3214 3215
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		return 0;
	}

3216 3217 3218 3219
	/* Does F/W have an IOCBs for this request */
	res = qlt_check_reserve_free_req(vha, prm.req_cnt);
	if (res != 0)
		goto out_unlock_free_unmap;
3220 3221 3222 3223
	if (cmd->se_cmd.prot_op)
		res = qlt_build_ctio_crc2_pkt(&prm, vha);
	else
		res = qlt_24xx_build_ctio_pkt(&prm, vha);
3224

3225 3226
	if (unlikely(res != 0)) {
		vha->req->cnt += prm.req_cnt;
3227
		goto out_unlock_free_unmap;
3228 3229
	}

3230
	pkt = (struct ctio7_to_24xx *)prm.pkt;
3231
	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3232
	    CTIO7_FLAGS_STATUS_MODE_0);
3233 3234 3235

	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
		qlt_load_data_segments(&prm, vha);
3236 3237

	cmd->state = QLA_TGT_STATE_NEED_DATA;
3238
	cmd->cmd_sent_to_fw = 1;
3239

3240 3241
	/* Memory Barrier */
	wmb();
3242 3243 3244 3245 3246 3247
	qla2x00_start_iocbs(vha, vha->req);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;

out_unlock_free_unmap:
J
Joern Engel 已提交
3248
	qlt_unmap_sg(vha, cmd);
3249 3250 3251 3252 3253 3254
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	return res;
}
EXPORT_SYMBOL(qlt_rdy_to_xfer);

3255 3256

/*
3257
 * it is assumed either hardware_lock or qpair lock is held.
3258
 */
3259
static void
3260
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
3261
	struct ctio_crc_from_fw *sts)
3262 3263 3264 3265
{
	uint8_t		*ap = &sts->actual_dif[0];
	uint8_t		*ep = &sts->expected_dif[0];
	uint64_t	lba = cmd->se_cmd.t_task_lba;
3266 3267
	uint8_t scsi_status, sense_key, asc, ascq;
	unsigned long flags;
3268

3269
	cmd->trc_flags |= TRC_DIF_ERR;
3270

3271 3272 3273
	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3274

3275 3276 3277
	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3278

3279 3280
	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3281

3282
	scsi_status = sense_key = asc = ascq = 0;
3283

3284 3285
	/* check appl tag */
	if (cmd->e_app_tag != cmd->a_app_tag) {
3286 3287 3288 3289 3290 3291
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3292 3293 3294 3295 3296 3297

		cmd->dif_err_code = DIF_ERR_APP;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x2;
3298 3299 3300
	}

	/* check ref tag */
3301
	if (cmd->e_ref_tag != cmd->a_ref_tag) {
3302 3303 3304 3305 3306 3307
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3308 3309 3310 3311 3312 3313

		cmd->dif_err_code = DIF_ERR_REF;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x3;
3314 3315 3316
		goto out;
	}

3317 3318
	/* check guard */
	if (cmd->e_guard != cmd->a_guard) {
3319 3320 3321 3322 3323 3324 3325
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);

3326 3327 3328 3329 3330
		cmd->dif_err_code = DIF_ERR_GRD;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x1;
3331 3332
	}
out:
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
	switch (cmd->state) {
	case QLA_TGT_STATE_NEED_DATA:
		/* handle_data will load DIF error code  */
		cmd->state = QLA_TGT_STATE_DATA_IN;
		vha->hw->tgt.tgt_ops->handle_data(cmd);
		break;
	default:
		spin_lock_irqsave(&cmd->cmd_lock, flags);
		if (cmd->aborted) {
			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
			vha->hw->tgt.tgt_ops->free_cmd(cmd);
			break;
		}
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3347

3348 3349 3350 3351 3352 3353 3354 3355
		qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
		/* assume scsi status gets out on the wire.
		 * Will not wait for completion.
		 */
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		break;
	}
}
3356

3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy)
{
	struct nack_to_isp *nack;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;

	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
	    "Sending TERM ELS CTIO (ha=%p)\n", ha);

3370
	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3371 3372 3373 3374 3375 3376 3377 3378 3379
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe080,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;
3380
	pkt->handle = QLA_TGT_SKIP_HANDLE;
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
	}

	/* terminate */
	nack->u.isp24.flags |=
		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);

	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked)
{
	unsigned long flags = 0;
	int rc;

	if (qlt_issue_marker(vha, ha_locked) < 0)
		return;

	if (ha_locked) {
		rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo  */
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3423 3424 3425
#else
		if (rc) {
		}
3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
#endif
		goto done;
	}

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo */
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
#endif

done:
	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}

3443 3444 3445 3446
/*
 * If hardware_lock held on entry, might drop it, then reaquire
 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
 */
3447 3448 3449 3450 3451 3452 3453 3454
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd,
	struct atio_from_isp *atio)
{
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;
3455
	uint16_t temp;
3456

3457
	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3458

3459
	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe050,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	if (cmd != NULL) {
		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
			ql_dbg(ql_dbg_tgt, vha, 0xe051,
			    "qla_target(%d): Terminating cmd %p with "
			    "incorrect state %d\n", vha->vp_idx, cmd,
			    cmd->state);
		} else
			ret = 1;
	}

3477
	vha->tgt_counters.num_term_xchg_sent++;
3478 3479 3480 3481 3482
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
3483
	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3484
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3485 3486 3487 3488 3489
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3490 3491 3492
	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
		CTIO7_FLAGS_TERMINATE;
	ctio24->u.status1.flags = cpu_to_le16(temp);
3493 3494
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3495 3496 3497 3498 3499 3500 3501 3502

	/* Most likely, it isn't needed */
	ctio24->u.status1.residual = get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
	if (ctio24->u.status1.residual != 0)
		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;

3503 3504
	/* Memory Barrier */
	wmb();
3505 3506 3507 3508 3509
	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3510 3511
	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
	int ul_abort)
3512
{
3513
	unsigned long flags = 0;
3514 3515 3516 3517 3518 3519 3520
	int rc;

	if (qlt_issue_marker(vha, ha_locked) < 0)
		return;

	if (ha_locked) {
		rc = __qlt_send_term_exchange(vha, cmd, atio);
3521 3522
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3523 3524 3525 3526
		goto done;
	}
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_exchange(vha, cmd, atio);
3527 3528
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3529

3530
done:
3531
	if (cmd && !ul_abort && !cmd->aborted) {
3532 3533
		if (cmd->sg_mapped)
			qlt_unmap_sg(vha, cmd);
3534 3535
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
	}
3536 3537 3538 3539

	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);

3540
	return;
3541 3542
}

3543 3544 3545 3546 3547 3548
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
	struct list_head free_list;
	struct qla_tgt_cmd *cmd, *tcmd;

	vha->hw->tgt.leak_exchg_thresh_hold =
3549
	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578

	cmd = tcmd = NULL;
	if (!list_empty(&vha->hw->tgt.q_full_list)) {
		INIT_LIST_HEAD(&free_list);
		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);

		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
			list_del(&cmd->cmd_list);
			/* This cmd was never sent to TCM.  There is no need
			 * to schedule free or call free_cmd
			 */
			qlt_free_cmd(cmd);
			vha->hw->tgt.num_qfull_cmds_alloc--;
		}
	}
	vha->hw->tgt.num_qfull_cmds_dropped = 0;
}

static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
{
	uint32_t total_leaked;

	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;

	if (vha->hw->tgt.leak_exchg_thresh_hold &&
	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {

		ql_dbg(ql_dbg_tgt, vha, 0xe079,
		    "Chip reset due to exchange starvation: %d/%d.\n",
3579
		    total_leaked, vha->hw->cur_fw_xcb_count);
3580 3581 3582 3583 3584 3585 3586 3587 3588 3589

		if (IS_P3P_TYPE(vha->hw))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
	}

}

3590
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3591 3592 3593 3594
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct se_cmd *se_cmd = &cmd->se_cmd;
3595
	unsigned long flags;
3596 3597 3598 3599 3600 3601

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
	    "qla_target(%d): terminating exchange for aborted cmd=%p "
	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
	    se_cmd->tag);

3602 3603 3604 3605 3606 3607 3608 3609
	spin_lock_irqsave(&cmd->cmd_lock, flags);
	if (cmd->aborted) {
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
		/*
		 * It's normal to see 2 calls in this path:
		 *  1) XFER Rdy completion + CMD_T_ABORT
		 *  2) TCM TMR - drain_state_list
		 */
3610 3611 3612 3613
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
		    "multiple abort. %p transport_state %x, t_state %x, "
		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3614 3615
		return EIO;
	}
3616
	cmd->aborted = 1;
3617
	cmd->trc_flags |= TRC_ABORT;
3618
	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3619

3620 3621
	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
	return 0;
3622 3623 3624
}
EXPORT_SYMBOL(qlt_abort_cmd);

3625 3626
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
3627
	struct fc_port *sess = cmd->sess;
3628

3629 3630 3631 3632
	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
	    "%s: se_cmd[%p] ox_id %04x\n",
	    __func__, &cmd->se_cmd,
	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3633

3634 3635
	BUG_ON(cmd->cmd_in_wq);

3636 3637 3638
	if (cmd->sg_mapped)
		qlt_unmap_sg(cmd->vha, cmd);

3639 3640 3641
	if (!cmd->q_full)
		qlt_decr_num_pend_cmds(cmd->vha);

3642
	BUG_ON(cmd->sg_mapped);
3643
	cmd->jiffies_at_free = get_jiffies_64();
3644 3645
	if (unlikely(cmd->free_sg))
		kfree(cmd->sg);
3646 3647 3648 3649 3650

	if (!sess || !sess->se_sess) {
		WARN_ON(1);
		return;
	}
3651
	cmd->jiffies_at_free = get_jiffies_64();
3652
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
}
EXPORT_SYMBOL(qlt_free_cmd);

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
	struct qla_tgt_cmd *cmd, uint32_t status)
{
	int term = 0;

3664
	if (cmd->se_cmd.prot_op)
3665
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3666 3667 3668 3669 3670 3671 3672 3673
		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
		    "se_cmd=%p tag[%x] op %#x/%s",
		     cmd->lba, cmd->lba,
		     cmd->num_blks, &cmd->se_cmd,
		     cmd->atio.u.isp24.exchange_addr,
		     cmd->se_cmd.prot_op,
		     prot_op_str(cmd->se_cmd.prot_op));

3674 3675 3676
	if (ctio != NULL) {
		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
		term = !(c->flags &
3677
		    cpu_to_le16(OF_TERM_EXCH));
3678 3679 3680 3681
	} else
		term = 1;

	if (term)
3682
		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712

	return term;
}

/* ha->hardware_lock supposed to be held on entry */
static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
	uint32_t handle)
{
	struct qla_hw_data *ha = vha->hw;

	handle--;
	if (ha->tgt.cmds[handle] != NULL) {
		struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
		ha->tgt.cmds[handle] = NULL;
		return cmd;
	} else
		return NULL;
}

/* ha->hardware_lock supposed to be held on entry */
static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
	uint32_t handle, void *ctio)
{
	struct qla_tgt_cmd *cmd = NULL;

	/* Clear out internal marks */
	handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
	    CTIO_INTERMEDIATE_HANDLE_MARK);

	if (handle != QLA_TGT_NULL_HANDLE) {
3713
		if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3714
			return NULL;
3715

3716
		/* handle-1 is actually used */
3717
		if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741
			ql_dbg(ql_dbg_tgt, vha, 0xe052,
			    "qla_target(%d): Wrong handle %x received\n",
			    vha->vp_idx, handle);
			return NULL;
		}
		cmd = qlt_get_cmd(vha, handle);
		if (unlikely(cmd == NULL)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe053,
			    "qla_target(%d): Suspicious: unable to "
			    "find the command with handle %x\n", vha->vp_idx,
			    handle);
			return NULL;
		}
	} else if (ctio != NULL) {
		/* We can't get loop ID from CTIO7 */
		ql_dbg(ql_dbg_tgt, vha, 0xe054,
		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
		    "support NULL handles\n", vha->vp_idx);
		return NULL;
	}

	return cmd;
}

3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
/* hardware_lock should be held by caller. */
static void
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t handle;

	if (cmd->sg_mapped)
		qlt_unmap_sg(vha, cmd);

	handle = qlt_make_handle(vha);

	/* TODO: fix debug message type and ids. */
	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
		ql_dbg(ql_dbg_io, vha, 0xff00,
		    "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->write_data_transferred = 0;
		cmd->state = QLA_TGT_STATE_DATA_IN;

		ql_dbg(ql_dbg_io, vha, 0xff01,
		    "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
	} else {
		ql_dbg(ql_dbg_io, vha, 0xff03,
		    "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
		    cmd->state);
		dump_stack();
	}

3774
	cmd->trc_flags |= TRC_FLUSH;
3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
	ha->tgt.tgt_ops->free_cmd(cmd);
}

void
qlt_host_reset_handler(struct qla_hw_data *ha)
{
	struct qla_tgt_cmd *cmd;
	unsigned long flags;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
	scsi_qla_host_t *vha = NULL;
	struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
	uint32_t i;

	if (!base_vha->hw->tgt.tgt_ops)
		return;

	if (!tgt || qla_ini_mode_enabled(base_vha)) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
			"Target mode disabled\n");
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
	    "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
	    base_vha->dpc_flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
	for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
		cmd = qlt_get_cmd(base_vha, i);
		if (!cmd)
			continue;
		/* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
		vha = cmd->vha;
		qlt_abort_cmd_on_host_reset(vha, cmd);
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}


3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
	uint32_t status, void *ctio)
{
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd;
	struct qla_tgt_cmd *cmd;

	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
		/* That could happen only in case of an error/reset/abort */
		if (status != CTIO_SUCCESS) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
			    "Intermediate CTIO received"
			    " (status %x)\n", status);
		}
		return;
	}

	cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3835
	if (cmd == NULL)
3836
		return;
3837

3838
	se_cmd = &cmd->se_cmd;
3839
	cmd->cmd_sent_to_fw = 0;
3840

J
Joern Engel 已提交
3841
	qlt_unmap_sg(vha, cmd);
3842 3843 3844 3845 3846 3847

	if (unlikely(status != CTIO_SUCCESS)) {
		switch (status & 0xFFFF) {
		case CTIO_LIP_RESET:
		case CTIO_TARGET_RESET:
		case CTIO_ABORTED:
3848
			/* driver request abort via Terminate exchange */
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861
		case CTIO_TIMEOUT:
		case CTIO_INVALID_RX_ID:
			/* They are OK */
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
			    "qla_target(%d): CTIO with "
			    "status %#x received, state %x, se_cmd %p, "
			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
			    status, cmd->state, se_cmd);
			break;

		case CTIO_PORT_LOGGED_OUT:
		case CTIO_PORT_UNAVAILABLE:
3862
		{
3863 3864 3865
			int logged_out =
				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;

3866
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3867
			    "qla_target(%d): CTIO with %s status %x "
3868
			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
3869
			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3870 3871
			    status, cmd->state, se_cmd);

3872 3873 3874 3875 3876 3877 3878
			if (logged_out && cmd->sess) {
				/*
				 * Session is already logged out, but we need
				 * to notify initiator, who's not aware of this
				 */
				cmd->sess->logout_on_delete = 0;
				cmd->sess->send_els_logo = 1;
3879
				ql_dbg(ql_dbg_disc, vha, 0x20f8,
3880 3881 3882 3883
				    "%s %d %8phC post del sess\n",
				    __func__, __LINE__, cmd->sess->port_name);

				qlt_schedule_sess_for_deletion_lock(cmd->sess);
3884 3885 3886
			}
			break;
		}
3887 3888 3889 3890
		case CTIO_DIF_ERROR: {
			struct ctio_crc_from_fw *crc =
				(struct ctio_crc_from_fw *)ctio;
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3891 3892 3893
			    "qla_target(%d): CTIO with DIF_ERROR status %x "
			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
			    "expect_dif[0x%llx]\n",
3894 3895 3896 3897
			    vha->vp_idx, status, cmd->state, se_cmd,
			    *((u64 *)&crc->actual_dif[0]),
			    *((u64 *)&crc->expected_dif[0]));

3898 3899
			qlt_handle_dif_error(vha, cmd, ctio);
			return;
3900
		}
3901 3902
		default:
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3903
			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3904 3905 3906 3907
			    vha->vp_idx, status, cmd->state, se_cmd);
			break;
		}

3908

3909
		/* "cmd->aborted" means
3910 3911 3912 3913 3914 3915
		 * cmd is already aborted/terminated, we don't
		 * need to terminate again.  The exchange is already
		 * cleaned up/freed at FW level.  Just cleanup at driver
		 * level.
		 */
		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3916
		    (!cmd->aborted)) {
3917
			cmd->trc_flags |= TRC_CTIO_ERR;
3918 3919
			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
				return;
3920
		}
3921 3922 3923
	}

	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3924
		cmd->trc_flags |= TRC_CTIO_DONE;
3925 3926 3927
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->state = QLA_TGT_STATE_DATA_IN;

3928
		if (status == CTIO_SUCCESS)
3929 3930 3931 3932
			cmd->write_data_transferred = 1;

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
3933
	} else if (cmd->aborted) {
3934
		cmd->trc_flags |= TRC_CTIO_ABORTED;
3935
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3936
		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3937
	} else {
3938
		cmd->trc_flags |= TRC_CTIO_STRANGE;
3939 3940 3941 3942 3943
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
		    "qla_target(%d): A command in state (%d) should "
		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
	}

3944
	if (unlikely(status != CTIO_SUCCESS) &&
3945
		!cmd->aborted) {
3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
		dump_stack();
	}

	ha->tgt.tgt_ops->free_cmd(cmd);
}

static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
	uint8_t task_codes)
{
	int fcp_task_attr;

	switch (task_codes) {
	case ATIO_SIMPLE_QUEUE:
C
Christoph Hellwig 已提交
3960
		fcp_task_attr = TCM_SIMPLE_TAG;
3961 3962
		break;
	case ATIO_HEAD_OF_QUEUE:
C
Christoph Hellwig 已提交
3963
		fcp_task_attr = TCM_HEAD_TAG;
3964 3965
		break;
	case ATIO_ORDERED_QUEUE:
C
Christoph Hellwig 已提交
3966
		fcp_task_attr = TCM_ORDERED_TAG;
3967 3968
		break;
	case ATIO_ACA_QUEUE:
C
Christoph Hellwig 已提交
3969
		fcp_task_attr = TCM_ACA_TAG;
3970 3971
		break;
	case ATIO_UNTAGGED:
C
Christoph Hellwig 已提交
3972
		fcp_task_attr = TCM_SIMPLE_TAG;
3973 3974 3975 3976 3977
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
		    "qla_target: unknown task code %x, use ORDERED instead\n",
		    task_codes);
C
Christoph Hellwig 已提交
3978
		fcp_task_attr = TCM_ORDERED_TAG;
3979 3980 3981 3982 3983 3984
		break;
	}

	return fcp_task_attr;
}

3985
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
3986 3987 3988 3989
					uint8_t *);
/*
 * Process context for I/O path into tcm_qla2xxx code
 */
3990
static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3991 3992 3993
{
	scsi_qla_host_t *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
3994
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3995
	struct fc_port *sess = cmd->sess;
3996 3997 3998 3999 4000 4001
	struct atio_from_isp *atio = &cmd->atio;
	unsigned char *cdb;
	unsigned long flags;
	uint32_t data_length;
	int ret, fcp_task_attr, data_dir, bidi = 0;

4002
	cmd->cmd_in_wq = 0;
4003
	cmd->trc_flags |= TRC_DO_WORK;
4004 4005 4006
	if (tgt->tgt_stop)
		goto out_term;

4007
	if (cmd->aborted) {
4008 4009 4010 4011 4012 4013
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
		    "cmd with tag %u is aborted\n",
		    cmd->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4014
	spin_lock_init(&cmd->cmd_lock);
4015
	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4016
	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
	cmd->unpacked_lun = scsilun_to_int(
	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);

	if (atio->u.isp24.fcp_cmnd.rddata &&
	    atio->u.isp24.fcp_cmnd.wrdata) {
		bidi = 1;
		data_dir = DMA_TO_DEVICE;
	} else if (atio->u.isp24.fcp_cmnd.rddata)
		data_dir = DMA_FROM_DEVICE;
	else if (atio->u.isp24.fcp_cmnd.wrdata)
		data_dir = DMA_TO_DEVICE;
	else
		data_dir = DMA_NONE;

	fcp_task_attr = qlt_get_fcp_task_attr(vha,
	    atio->u.isp24.fcp_cmnd.task_attr);
	data_length = be32_to_cpu(get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]));

4037 4038
	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
				          fcp_task_attr, data_dir, bidi);
4039 4040 4041 4042 4043
	if (ret != 0)
		goto out_term;
	/*
	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
	 */
4044
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4045
	ha->tgt.tgt_ops->put_sess(sess);
4046
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4047 4048 4049
	return;

out_term:
4050
	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4051
	/*
4052 4053
	 * cmd has not sent to target yet, so pass NULL as the second
	 * argument to qlt_send_term_exchange() and free the memory here.
4054
	 */
4055
	cmd->trc_flags |= TRC_DO_WORK_ERR;
4056
	spin_lock_irqsave(&ha->hardware_lock, flags);
4057
	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
4058 4059

	qlt_decr_num_pend_cmds(vha);
4060 4061
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4062 4063

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4064
	ha->tgt.tgt_ops->put_sess(sess);
4065
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4066 4067 4068 4069 4070
}

static void qlt_do_work(struct work_struct *work)
{
	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4071 4072 4073 4074 4075 4076
	scsi_qla_host_t *vha = cmd->vha;
	unsigned long flags;

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&cmd->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4077 4078 4079 4080 4081

	__qlt_do_work(cmd);
}

static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4082
				       struct fc_port *sess,
4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
				       struct atio_from_isp *atio)
{
	struct se_session *se_sess = sess->se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return NULL;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	memcpy(&cmd->atio, atio, sizeof(*atio));
	cmd->state = QLA_TGT_STATE_NEW;
	cmd->tgt = vha->vha_tgt.qla_tgt;
4099
	qlt_incr_num_pend_cmds(vha);
4100 4101 4102 4103 4104 4105
	cmd->vha = vha;
	cmd->se_cmd.map_tag = tag;
	cmd->sess = sess;
	cmd->loop_id = sess->loop_id;
	cmd->conf_compl_supported = sess->conf_compl_supported;

4106
	cmd->trc_flags = 0;
4107 4108 4109 4110
	cmd->jiffies_at_alloc = get_jiffies_64();

	cmd->reset_count = vha->hw->chip_reset;

4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122
	return cmd;
}

static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
			  uint16_t);

static void qlt_create_sess_from_atio(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
					struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
4123
	struct fc_port *sess;
4124 4125 4126 4127
	struct qla_tgt_cmd *cmd;
	unsigned long flags;
	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;

4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&op->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	if (op->aborted) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
		    "sess_op with tag %u is aborted\n",
		    op->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4139
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
4140 4141 4142
	    "qla_target(%d): Unable to find wwn login"
	    " (s_id %x:%x:%x), trying to create it manually\n",
	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
4143 4144 4145

	if (op->atio.u.raw.entry_count > 1) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
4146
		    "Dropping multy entry atio %p\n", &op->atio);
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163
		goto out_term;
	}

	sess = qlt_make_local_sess(vha, s_id);
	/* sess has an extra creation ref. */

	if (!sess)
		goto out_term;
	/*
	 * Now obtain a pre-allocated session tag using the original op->atio
	 * packet header, and dispatch into __qlt_do_work() using the existing
	 * process context.
	 */
	cmd = qlt_get_tag(vha, sess, &op->atio);
	if (!cmd) {
		spin_lock_irqsave(&ha->hardware_lock, flags);
		qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
4164
		ha->tgt.tgt_ops->put_sess(sess);
4165 4166 4167 4168
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		kfree(op);
		return;
	}
4169

4170
	/*
4171
	 * __qlt_do_work() will call qlt_put_sess() to release
4172 4173 4174 4175 4176 4177 4178
	 * the extra reference taken above by qlt_make_local_sess()
	 */
	__qlt_do_work(cmd);
	kfree(op);
	return;
out_term:
	spin_lock_irqsave(&ha->hardware_lock, flags);
4179
	qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
4180
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4181
	kfree(op);
4182 4183 4184 4185 4186 4187
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
	struct atio_from_isp *atio)
{
4188
	struct qla_hw_data *ha = vha->hw;
4189
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4190
	struct fc_port *sess;
4191
	struct qla_tgt_cmd *cmd;
4192
	unsigned long flags;
4193 4194

	if (unlikely(tgt->tgt_stop)) {
4195
		ql_dbg(ql_dbg_io, vha, 0x3061,
4196 4197 4198 4199
		    "New command while device %p is shutting down\n", tgt);
		return -EFAULT;
	}

4200 4201 4202 4203 4204 4205 4206 4207
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
	if (unlikely(!sess)) {
		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
						     GFP_ATOMIC);
		if (!op)
			return -ENOMEM;

		memcpy(&op->atio, atio, sizeof(*atio));
4208
		op->vha = vha;
4209

4210
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
4211
		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
4212
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4213

4214 4215 4216 4217
		INIT_WORK(&op->work, qlt_create_sess_from_atio);
		queue_work(qla_tgt_wq, &op->work);
		return 0;
	}
4218 4219 4220

	/* Another WWN used to have our s_id. Our PLOGI scheduled its
	 * session deletion, but it's still in sess_del_work wq */
4221
	if (sess->deleted) {
4222
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4223 4224 4225 4226 4227
		    "New command while old session %p is being deleted\n",
		    sess);
		return -EFAULT;
	}

4228 4229 4230
	/*
	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
	 */
4231
	if (!kref_get_unless_zero(&sess->sess_kref)) {
4232
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4233 4234 4235 4236 4237
		    "%s: kref_get fail, %8phC oxid %x \n",
		    __func__, sess->port_name,
		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
		return -EFAULT;
	}
4238 4239

	cmd = qlt_get_tag(vha, sess, atio);
4240
	if (!cmd) {
4241
		ql_dbg(ql_dbg_io, vha, 0x3062,
4242
		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4243 4244 4245
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		ha->tgt.tgt_ops->put_sess(sess);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4246 4247 4248
		return -ENOMEM;
	}

4249
	cmd->cmd_in_wq = 1;
4250
	cmd->trc_flags |= TRC_NEW_CMD;
4251 4252
	cmd->se_cmd.cpuid = ha->msix_count ?
		ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
4253

4254
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4255
	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4256
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4257

4258
	INIT_WORK(&cmd->work, qlt_do_work);
4259 4260 4261 4262 4263 4264 4265 4266 4267 4268
	if (ha->msix_count) {
		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
			queue_work_on(smp_processor_id(), qla_tgt_wq,
			    &cmd->work);
		else
			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
			    &cmd->work);
	} else {
		queue_work(qla_tgt_wq, &cmd->work);
	}
4269 4270 4271 4272 4273
	return 0;

}

/* ha->hardware_lock supposed to be held on entry */
4274
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4275 4276 4277 4278 4279
	int fn, void *iocb, int flags)
{
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4280
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299
	int res;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (!mcmd) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
		    "qla_target(%d): Allocation of management "
		    "command failed, some commands and their data could "
		    "leak\n", vha->vp_idx);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));
	mcmd->sess = sess;

	if (iocb) {
		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
		    sizeof(mcmd->orig_iocb.imm_ntfy));
	}
	mcmd->tmr_func = fn;
	mcmd->flags = flags;
4300
	mcmd->reset_count = vha->hw->chip_reset;
4301 4302 4303

	switch (fn) {
	case QLA_TGT_LUN_RESET:
4304 4305
	    abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
	    break;
4306 4307
	}

4308
	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325
	if (res != 0) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
		    sess->vha->vp_idx, res);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt;
4326
	struct fc_port *sess;
4327
	u64 unpacked_lun;
4328
	int fn;
4329
	unsigned long flags;
4330

4331
	tgt = vha->vha_tgt.qla_tgt;
4332 4333

	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4334 4335

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4336 4337
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    a->u.isp24.fcp_hdr.s_id);
4338 4339
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4340 4341
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4342 4343 4344 4345 4346 4347 4348 4349 4350

	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
		    "qla_target(%d): task mgmt fn 0x%x for "
		    "non-existant session\n", vha->vp_idx, fn);
		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
		    sizeof(struct atio_from_isp));
	}

4351
	if (sess->deleted)
4352 4353
		return -EFAULT;

4354 4355 4356 4357 4358
	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}

/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
4359
	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4360 4361 4362 4363
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4364
	u64 unpacked_lun;
4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379
	int rc;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
	    sizeof(mcmd->orig_iocb.imm_ntfy));

4380 4381
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4382
	mcmd->reset_count = vha->hw->chip_reset;
4383
	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4384

4385
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402
	    le16_to_cpu(iocb->u.isp2x.seq_id));
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
		    vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_abort_task(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
4403
	struct fc_port *sess;
4404
	int loop_id;
4405
	unsigned long flags;
4406 4407 4408

	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);

4409
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4410
	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4411 4412
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4413 4414 4415 4416
	if (sess == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
		    "qla_target(%d): task abort for unexisting "
		    "session\n", vha->vp_idx);
4417
		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4418 4419 4420 4421 4422 4423
		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
	}

	return __qlt_abort_task(vha, iocb, sess);
}

4424 4425
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439
	if (rc != MBS_COMMAND_COMPLETE) {
		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
			"%s: se_sess %p / sess %p from"
			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
			" LOGO failed: %#x\n",
			__func__,
			fcport->se_sess,
			fcport,
			fcport->port_name, fcport->loop_id,
			fcport->d_id.b.domain, fcport->d_id.b.area,
			fcport->d_id.b.al_pa, rc);
	}

	fcport->logout_completed = 1;
4440 4441 4442 4443 4444 4445 4446 4447 4448
}

/*
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
*
* Schedules sessions with matching port_id/loop_id but different wwn for
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
4449 4450
struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4451
    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4452
{
4453
	struct fc_port *sess = NULL, *other_sess;
4454 4455
	uint64_t other_wwn;

4456 4457
	*conflict_sess = NULL;

4458
	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4459 4460 4461 4462 4463 4464 4465 4466 4467 4468

		other_wwn = wwn_to_u64(other_sess->port_name);

		if (wwn == other_wwn) {
			WARN_ON(sess);
			sess = other_sess;
			continue;
		}

		/* find other sess with nport_id collision */
4469
		if (port_id.b24 == other_sess->d_id.b24) {
4470
			if (loop_id != other_sess->loop_id) {
4471
				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);

				/*
				 * logout_on_delete is set by default, but another
				 * session that has the same s_id/loop_id combo
				 * might have cleared it when requested this session
				 * deletion, so don't touch it
				 */
				qlt_schedule_sess_for_deletion(other_sess, true);
			} else {
				/*
				 * Another wwn used to have our s_id/loop_id
4485
				 * kill the session, but don't free the loop_id
4486
				 */
4487
				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4488 4489 4490 4491
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);


4492 4493
				other_sess->keep_nport_handle = 1;
				*conflict_sess = other_sess;
4494 4495 4496 4497 4498 4499 4500
				qlt_schedule_sess_for_deletion(other_sess,
				    true);
			}
			continue;
		}

		/* find other sess with nport handle collision */
4501 4502 4503
		if ((loop_id == other_sess->loop_id) &&
			(loop_id != FC_NO_LOOP_ID)) {
			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515
			       "Invalidating sess %p loop_id %d wwn %llx.\n",
			       other_sess, other_sess->loop_id, other_wwn);

			/* Same loop_id but different s_id
			 * Ok to kill and logout */
			qlt_schedule_sess_for_deletion(other_sess, true);
		}
	}

	return sess;
}

4516 4517 4518 4519 4520 4521 4522
/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
	int count = 0;
4523
	unsigned long flags;
4524 4525 4526 4527 4528

	key = (((u32)s_id->b.domain << 16) |
	       ((u32)s_id->b.area   <<  8) |
	       ((u32)s_id->b.al_pa));

4529
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4530 4531
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4532

4533 4534 4535 4536 4537
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}
4538 4539 4540 4541 4542 4543 4544 4545 4546

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}

4547 4548 4549
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		if (cmd_key == key) {
4550
			cmd->aborted = 1;
4551 4552 4553
			count++;
		}
	}
4554
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4555 4556 4557 4558

	return count;
}

4559 4560 4561 4562 4563 4564
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
4565
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4566
	struct qla_hw_data *ha = vha->hw;
4567
	struct fc_port *sess = NULL, *conflict_sess = NULL;
4568 4569 4570 4571
	uint64_t wwn;
	port_id_t port_id;
	uint16_t loop_id;
	uint16_t wd3_lo;
4572
	int res = 0;
4573
	struct qlt_plogi_ack_t *pla;
4574
	unsigned long flags;
4575

4576 4577 4578 4579 4580 4581 4582 4583 4584
	wwn = wwn_to_u64(iocb->u.isp24.port_name);

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

4585 4586 4587 4588 4589 4590
	ql_dbg(ql_dbg_disc, vha, 0xf026,
	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
	    vha->vp_idx, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		   iocb->u.isp24.status_subcode, loop_id,
		iocb->u.isp24.port_name);
4591

4592 4593 4594
	/* res = 1 means ack at the end of thread
	 * res = 0 means ack async/later.
	 */
4595 4596
	switch (iocb->u.isp24.status_subcode) {
	case ELS_PLOGI:
4597

4598 4599 4600
		/* Mark all stale commands in qla_tgt_wq for deletion */
		abort_cmds_for_s_id(vha, &port_id);

4601 4602
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4603 4604
			sess = qlt_find_sess_invalidate_other(vha, wwn,
				port_id, loop_id, &conflict_sess);
4605 4606
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4607

4608
		if (IS_SW_RESV_ADDR(port_id)) {
4609 4610 4611 4612
			res = 1;
			break;
		}

4613 4614
		pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
		if (!pla) {
4615 4616 4617 4618 4619 4620
			qlt_send_term_imm_notif(vha, iocb, 1);
			break;
		}

		res = 0;

4621 4622
		if (conflict_sess) {
			conflict_sess->login_gen++;
4623
			qlt_plogi_ack_link(vha, pla, conflict_sess,
4624 4625
				QLT_PLOGI_LINK_CONFLICT);
		}
4626

4627 4628 4629 4630 4631
		if (!sess) {
			pla->ref_count++;
			qla24xx_post_newsess_work(vha, &port_id,
				iocb->u.isp24.port_name, pla);
			res = 0;
4632
			break;
4633
		}
4634 4635

		qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666
		sess->fw_login_state = DSC_LS_PLOGI_PEND;
		sess->d_id = port_id;
		sess->login_gen++;

		switch (sess->disc_state) {
		case DSC_DELETED:
			qlt_plogi_ack_unref(vha, pla);
			break;

		default:
			/*
			 * Under normal circumstances we want to release nport handle
			 * during LOGO process to avoid nport handle leaks inside FW.
			 * The exception is when LOGO is done while another PLOGI with
			 * the same nport handle is waiting as might be the case here.
			 * Note: there is always a possibily of a race where session
			 * deletion has already started for other reasons (e.g. ACL
			 * removal) and now PLOGI arrives:
			 * 1. if PLOGI arrived in FW after nport handle has been freed,
			 *    FW must have assigned this PLOGI a new/same handle and we
			 *    can proceed ACK'ing it as usual when session deletion
			 *    completes.
			 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
			 *    bit reached it, the handle has now been released. We'll
			 *    get an error when we ACK this PLOGI. Nothing will be sent
			 *    back to initiator. Initiator should eventually retry
			 *    PLOGI and situation will correct itself.
			 */
			sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
			   (sess->d_id.b24 == port_id.b24));

4667 4668 4669
			ql_dbg(ql_dbg_disc, vha, 0x20f9,
			    "%s %d %8phC post del sess\n",
			    __func__, __LINE__, sess->port_name);
4670 4671 4672 4673 4674 4675


			qlt_schedule_sess_for_deletion_lock(sess);
			break;
		}

4676 4677
		break;

4678
	case ELS_PRLI:
4679 4680
		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);

4681 4682
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4683 4684
			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
				loop_id, &conflict_sess);
4685 4686
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4687 4688 4689 4690 4691 4692 4693 4694 4695

		if (conflict_sess) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
			    "PRLI with conflicting sess %p port %8phC\n",
			    conflict_sess, conflict_sess->port_name);
			qlt_send_term_imm_notif(vha, iocb, 1);
			res = 0;
			break;
		}
4696 4697

		if (sess != NULL) {
4698 4699
			if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
			    sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4700 4701 4702 4703 4704
				/*
				 * Impatient initiator sent PRLI before last
				 * PLOGI could finish. Will force him to re-try,
				 * while last one finishes.
				 */
4705
				ql_log(ql_log_warn, sess->vha, 0xf095,
4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716
				    "sess %p PRLI received, before plogi ack.\n",
				    sess);
				qlt_send_term_imm_notif(vha, iocb, 1);
				res = 0;
				break;
			}

			/*
			 * This shouldn't happen under normal circumstances,
			 * since we have deleted the old session during PLOGI
			 */
4717
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4718 4719 4720 4721 4722
			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
			    sess->loop_id, sess, iocb->u.isp24.nport_handle);

			sess->local = 0;
			sess->loop_id = loop_id;
4723
			sess->d_id = port_id;
4724
			sess->fw_login_state = DSC_LS_PRLI_PEND;
4725 4726 4727 4728

			if (wd3_lo & BIT_7)
				sess->conf_compl_supported = 1;

4729 4730 4731 4732
			if ((wd3_lo & BIT_4) == 0)
				sess->port_type = FCT_INITIATOR;
			else
				sess->port_type = FCT_TARGET;
4733 4734 4735 4736 4737
		}
		res = 1; /* send notify ack */

		/* Make session global (not used in fabric mode) */
		if (ha->current_topology != ISP_CFG_F) {
4738
			if (sess) {
4739
				ql_dbg(ql_dbg_disc, vha, 0x20fa,
4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			} else {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
4750
		} else {
4751
			if (sess) {
4752
				ql_dbg(ql_dbg_disc, vha, 0x20fb,
4753 4754
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
4755 4756 4757 4758 4759
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			}
		}
4760 4761
		break;

4762 4763 4764 4765 4766 4767 4768 4769 4770
	case ELS_TPRLO:
		if (le16_to_cpu(iocb->u.isp24.flags) &
			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
			loop_id = 0xFFFF;
			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
			res = 1;
			break;
		}
		/* drop through */
4771 4772
	case ELS_LOGO:
	case ELS_PRLO:
4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		if (sess) {
			sess->login_gen++;
			sess->fw_login_state = DSC_LS_LOGO_PEND;
			sess->logo_ack_needed = 1;
			memcpy(sess->iocb, iocb, IOCB_SIZE);
		}

4784
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4785

4786
		ql_dbg(ql_dbg_disc, vha, 0x20fc,
4787 4788 4789
		    "%s: logo %llx res %d sess %p ",
		    __func__, wwn, res, sess);
		if (res == 0) {
4790 4791 4792 4793
			/*
			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
			 * for LOGO_ACK & sess delete
			 */
4794 4795 4796
			BUG_ON(!sess);
			res = 0;
		} else {
4797
			/* cmd did not go to upper layer. */
4798 4799 4800 4801 4802 4803
			if (sess) {
				qlt_schedule_sess_for_deletion_lock(sess);
				res = 0;
			}
			/* else logo will be ack */
		}
4804 4805 4806 4807
		break;
	case ELS_PDISC:
	case ELS_ADISC:
	{
4808
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4809 4810 4811 4812 4813
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
4814 4815 4816 4817

		sess = qla2x00_find_fcport_by_wwpn(vha,
		    iocb->u.isp24.port_name, 1);
		if (sess) {
4818
			ql_dbg(ql_dbg_disc, vha, 0x20fd,
4819 4820 4821 4822 4823
				"sess %p lid %d|%d DS %d LS %d\n",
				sess, sess->loop_id, loop_id,
				sess->disc_state, sess->fw_login_state);
		}

4824 4825 4826 4827
		res = 1; /* send notify ack */
		break;
	}

4828
	case ELS_FLOGI:	/* should never happen */
4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
		    "qla_target(%d): Unsupported ELS command %x "
		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
		break;
	}

	return res;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t add_flags = 0;
	int send_notify_ack = 1;
	uint16_t status;

	status = le16_to_cpu(iocb->u.isp2x.status);
	switch (status) {
	case IMM_NTFY_LIP_RESET:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_LIP_LINK_REINIT:
	{
4867
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
		    "qla_target(%d): LINK REINIT (loop %#x, "
		    "subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
		}
		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
		tgt->link_reinit_iocb_pending = 1;
		/*
		 * QLogic requires to wait after LINK REINIT for possible
		 * PDISC or ADISC ELS commands
		 */
		send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_PORT_LOGOUT:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
		    "qla_target(%d): Port logout (loop "
		    "%#x, subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_TPRLO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_PORT_CONFIG:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
		    status);
		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_LOGO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
		    "qla_target(%d): Link failure detected\n",
		    vha->vp_idx);
		/* I_T nexus loss */
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_IOCB_OVERFLOW:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
		    "qla_target(%d): Cannot provide requested "
		    "capability (IOCB overflowed the immediate notify "
		    "resource count)\n", vha->vp_idx);
		break;

	case IMM_NTFY_ABORT_TASK:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
		    "qla_target(%d): Abort Task (S %08x I %#x -> "
		    "L %#x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp2x.seq_id),
		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
		    le16_to_cpu(iocb->u.isp2x.lun));
		if (qlt_abort_task(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_RESOURCE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
		    "qla_target(%d): Out of resources, host %ld\n",
		    vha->vp_idx, vha->host_no);
		break;

	case IMM_NTFY_MSG_RX:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
		    "qla_target(%d): Immediate notify task %x\n",
		    vha->vp_idx, iocb->u.isp2x.task_flags);
		if (qlt_handle_task_mgmt(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_ELS:
		if (qlt_24xx_handle_els(vha, iocb) == 0)
			send_notify_ack = 0;
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
		    "qla_target(%d): Received unknown immediate "
		    "notify status %x\n", vha->vp_idx, status);
		break;
	}

	if (send_notify_ack)
		qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 * This function sends busy to ISP 2xxx or 24xx.
 */
4976
static int __qlt_send_busy(struct scsi_qla_host *vha,
4977 4978 4979 4980 4981
	struct atio_from_isp *atio, uint16_t status)
{
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
4982
	struct fc_port *sess = NULL;
4983
	unsigned long flags;
4984
	u16 temp;
4985

4986
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4987 4988
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    atio->u.isp24.fcp_hdr.s_id);
4989
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4990
	if (!sess) {
4991
		qlt_send_term_exchange(vha, NULL, atio, 1, 0);
4992
		return 0;
4993 4994 4995 4996 4997
	}
	/* Sending marker isn't necessary, since we called from ISR */

	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
	if (!pkt) {
4998
		ql_dbg(ql_dbg_io, vha, 0x3063,
4999 5000
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
5001
		return -ENOMEM;
5002 5003
	}

5004
	vha->tgt_counters.num_q_full_sent++;
5005 5006 5007 5008 5009 5010
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
	ctio24->nport_handle = sess->loop_id;
5011
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5012 5013 5014 5015 5016
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5017
	temp = (atio->u.isp24.attr << 9) |
5018
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5019 5020
		CTIO7_FLAGS_DONT_RET_CTIO;
	ctio24->u.status1.flags = cpu_to_le16(temp);
5021 5022 5023 5024 5025 5026
	/*
	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
	 * if the explicit conformation is used.
	 */
	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5027 5028
	/* Memory Barrier */
	wmb();
5029
	qla2x00_start_iocbs(vha, vha->req);
5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043
	return 0;
}

/*
 * This routine is used to allocate a command for either a QFull condition
 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
 * out previously.
 */
static void
qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull)
{
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_hw_data *ha = vha->hw;
5044
	struct fc_port *sess;
5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057
	struct se_session *se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;

	if (unlikely(tgt->tgt_stop)) {
		ql_dbg(ql_dbg_io, vha, 0x300a,
			"New command while device %p is shutting down\n", tgt);
		return;
	}

	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5058 5059
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089
				vha->hw->tgt.num_qfull_cmds_dropped;

		ql_dbg(ql_dbg_io, vha, 0x3068,
			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
			vha->vp_idx, __func__,
			vha->hw->tgt.num_qfull_cmds_dropped);

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	sess = ha->tgt.tgt_ops->find_sess_by_s_id
		(vha, atio->u.isp24.fcp_hdr.s_id);
	if (!sess)
		return;

	se_sess = sess->se_sess;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	if (!cmd) {
		ql_dbg(ql_dbg_io, vha, 0x3009,
			"qla_target(%d): %s: Allocation of cmd failed\n",
			vha->vp_idx, __func__);

		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5090 5091
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119
				vha->hw->tgt.num_qfull_cmds_dropped;

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	qlt_incr_num_pend_cmds(vha);
	INIT_LIST_HEAD(&cmd->cmd_list);
	memcpy(&cmd->atio, atio, sizeof(*atio));

	cmd->tgt = vha->vha_tgt.qla_tgt;
	cmd->vha = vha;
	cmd->reset_count = vha->hw->chip_reset;
	cmd->q_full = 1;

	if (qfull) {
		cmd->q_full = 1;
		/* NOTE: borrowing the state field to carry the status */
		cmd->state = status;
	} else
		cmd->term_exchg = 1;

	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);

	vha->hw->tgt.num_qfull_cmds_alloc++;
	if (vha->hw->tgt.num_qfull_cmds_alloc >
5120 5121
		vha->qla_stats.stat_max_qfull_cmds_alloc)
		vha->qla_stats.stat_max_qfull_cmds_alloc =
5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201
			vha->hw->tgt.num_qfull_cmds_alloc;
}

int
qlt_free_qfull_cmds(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
	struct qla_tgt_cmd *cmd, *tcmd;
	struct list_head free_list;
	int rc = 0;

	if (list_empty(&ha->tgt.q_full_list))
		return 0;

	INIT_LIST_HEAD(&free_list);

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);

	if (list_empty(&ha->tgt.q_full_list)) {
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
		return 0;
	}

	list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
		if (cmd->q_full)
			/* cmd->state is a borrowed field to hold status */
			rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
		else if (cmd->term_exchg)
			rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);

		if (rc == -ENOMEM)
			break;

		if (cmd->q_full)
			ql_dbg(ql_dbg_io, vha, 0x3006,
			    "%s: busy sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else if (cmd->term_exchg)
			ql_dbg(ql_dbg_io, vha, 0x3007,
			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else
			ql_dbg(ql_dbg_io, vha, 0x3008,
			    "%s: Unexpected cmd in QFull list %p\n", __func__,
			    cmd);

		list_del(&cmd->cmd_list);
		list_add_tail(&cmd->cmd_list, &free_list);

		/* piggy back on hardware_lock for protection */
		vha->hw->tgt.num_qfull_cmds_alloc--;
	}
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);

	cmd = NULL;

	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
		list_del(&cmd->cmd_list);
		/* This cmd was never sent to TCM.  There is no need
		 * to schedule free or call free_cmd
		 */
		qlt_free_cmd(cmd);
	}
	return rc;
}

static void
qlt_send_busy(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status)
{
	int rc = 0;

	rc = __qlt_send_busy(vha, atio, status);
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, status, 1);
}

static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5202
	struct atio_from_isp *atio, bool ha_locked)
5203 5204 5205
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t status;
5206
	unsigned long flags;
5207 5208 5209 5210

	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
		return 0;

5211 5212
	if (!ha_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);
5213 5214
	status = temp_sam_status;
	qlt_send_busy(vha, atio, status);
5215 5216 5217
	if (!ha_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

5218
	return 1;
5219 5220 5221 5222 5223
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5224
	struct atio_from_isp *atio, uint8_t ha_locked)
5225 5226
{
	struct qla_hw_data *ha = vha->hw;
5227
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5228
	int rc;
5229
	unsigned long flags;
5230 5231

	if (unlikely(tgt == NULL)) {
5232
		ql_dbg(ql_dbg_tgt, vha, 0x3064,
5233 5234 5235 5236 5237 5238 5239 5240
		    "ATIO pkt, but no tgt (ha %p)", ha);
		return;
	}
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

5241
	tgt->atio_irq_cmd_count++;
5242 5243 5244 5245 5246

	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
		if (unlikely(atio->u.isp24.exchange_addr ==
		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5247
			ql_dbg(ql_dbg_io, vha, 0x3065,
5248 5249 5250
			    "qla_target(%d): ATIO_TYPE7 "
			    "received with UNKNOWN exchange address, "
			    "sending QUEUE_FULL\n", vha->vp_idx);
5251 5252
			if (!ha_locked)
				spin_lock_irqsave(&ha->hardware_lock, flags);
5253
			qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5254 5255
			if (!ha_locked)
				spin_unlock_irqrestore(&ha->hardware_lock, flags);
5256 5257
			break;
		}
5258 5259 5260 5261



		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5262
			rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
5263
			if (rc != 0) {
5264
				tgt->atio_irq_cmd_count--;
5265 5266
				return;
			}
5267
			rc = qlt_handle_cmd_for_atio(vha, atio);
5268
		} else {
5269
			rc = qlt_handle_task_mgmt(vha, atio);
5270
		}
5271 5272
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
5273 5274 5275 5276
				if (!ha_locked)
					spin_lock_irqsave
						(&ha->hardware_lock, flags);

5277 5278 5279
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
				qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
5280
				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5281
#endif
5282 5283 5284 5285 5286

				if (!ha_locked)
					spin_unlock_irqrestore
						(&ha->hardware_lock, flags);

5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe059,
					    "qla_target: Unable to send "
					    "command to target for req, "
					    "ignoring.\n");
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status.\n", vha->vp_idx);
5298 5299 5300
					if (!ha_locked)
						spin_lock_irqsave(
						    &ha->hardware_lock, flags);
5301
					qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5302 5303 5304
					if (!ha_locked)
						spin_unlock_irqrestore(
						    &ha->hardware_lock, flags);
5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320
				}
			}
		}
		break;

	case IMMED_NOTIFY_TYPE:
	{
		if (unlikely(atio->u.isp2x.entry_status != 0)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
			    "qla_target(%d): Received ATIO packet %x "
			    "with error status %x\n", vha->vp_idx,
			    atio->u.raw.entry_type,
			    atio->u.isp2x.entry_status);
			break;
		}
		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5321 5322 5323

		if (!ha_locked)
			spin_lock_irqsave(&ha->hardware_lock, flags);
5324
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5325 5326
		if (!ha_locked)
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5327 5328 5329 5330 5331 5332 5333 5334 5335 5336
		break;
	}

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

5337
	tgt->atio_irq_cmd_count--;
5338 5339 5340 5341 5342 5343 5344
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
	struct qla_hw_data *ha = vha->hw;
5345
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
		    "qla_target(%d): Response pkt %x received, but no "
		    "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
		return;
	}

	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	tgt->irq_cmd_count++;

	switch (pkt->entry_type) {
5362
	case CTIO_CRC2:
5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case ACCEPT_TGT_IO_TYPE:
	{
		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
		int rc;
		if (atio->u.isp2x.status !=
5377
		    cpu_to_le16(ATIO_CDB_VALID)) {
5378 5379 5380 5381 5382 5383 5384
			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
			    "qla_target(%d): ATIO with error "
			    "status %x received\n", vha->vp_idx,
			    le16_to_cpu(atio->u.isp2x.status));
			break;
		}

5385
		rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
5386 5387 5388 5389 5390
		if (rc != 0) {
			tgt->irq_cmd_count--;
			return;
		}

5391 5392 5393 5394 5395 5396
		rc = qlt_handle_cmd_for_atio(vha, atio);
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
				qlt_send_busy(vha, atio, 0);
#else
5397
				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5398 5399 5400 5401 5402 5403 5404 5405
#endif
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
					    "qla_target: Unable to send "
					    "command to target, sending TERM "
					    "EXCHANGE for rsp\n");
					qlt_send_term_exchange(vha, NULL,
5406
					    atio, 1, 0);
5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe060,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status\n", vha->vp_idx);
					qlt_send_busy(vha, atio, 0);
				}
			}
		}
	}
	break;

	case CONTINUE_TGT_IO_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case CTIO_A64_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
		qlt_do_ctio_completion(vha, entry->handle,
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case IMMED_NOTIFY_TYPE:
		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
		break;

	case NOTIFY_ACK_TYPE:
		if (tgt->notify_ack_expected > 0) {
			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe036,
			    "NOTIFY_ACK seq %08x status %x\n",
			    le16_to_cpu(entry->u.isp2x.seq_id),
			    le16_to_cpu(entry->u.isp2x.status));
			tgt->notify_ack_expected--;
			if (entry->u.isp2x.status !=
5451
			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530
				ql_dbg(ql_dbg_tgt, vha, 0xe061,
				    "qla_target(%d): NOTIFY_ACK "
				    "failed %x\n", vha->vp_idx,
				    le16_to_cpu(entry->u.isp2x.status));
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe062,
			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
			    vha->vp_idx);
		}
		break;

	case ABTS_RECV_24XX:
		ql_dbg(ql_dbg_tgt, vha, 0xe037,
		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
		break;

	case ABTS_RESP_24XX:
		if (tgt->abts_resp_expected > 0) {
			struct abts_resp_from_24xx_fw *entry =
				(struct abts_resp_from_24xx_fw *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe038,
			    "ABTS_RESP_24XX: compl_status %x\n",
			    entry->compl_status);
			tgt->abts_resp_expected--;
			if (le16_to_cpu(entry->compl_status) !=
			    ABTS_RESP_COMPL_SUCCESS) {
				if ((entry->error_subcode1 == 0x1E) &&
				    (entry->error_subcode2 == 0)) {
					/*
					 * We've got a race here: aborted
					 * exchange not terminated, i.e.
					 * response for the aborted command was
					 * sent between the abort request was
					 * received and processed.
					 * Unfortunately, the firmware has a
					 * silly requirement that all aborted
					 * exchanges must be explicitely
					 * terminated, otherwise it refuses to
					 * send responses for the abort
					 * requests. So, we have to
					 * (re)terminate the exchange and retry
					 * the abort response.
					 */
					qlt_24xx_retry_term_exchange(vha,
					    entry);
				} else
					ql_dbg(ql_dbg_tgt, vha, 0xe063,
					    "qla_target(%d): ABTS_RESP_24XX "
					    "failed %x (subcode %x:%x)",
					    vha->vp_idx, entry->compl_status,
					    entry->error_subcode1,
					    entry->error_subcode2);
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe064,
			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
			    "received\n", vha->vp_idx);
		}
		break;

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe065,
		    "qla_target(%d): Received unknown response pkt "
		    "type %x\n", vha->vp_idx, pkt->entry_type);
		break;
	}

	tgt->irq_cmd_count--;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
	uint16_t *mailbox)
{
	struct qla_hw_data *ha = vha->hw;
5531
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5532
	int login_code;
5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559

	if (!ha->tgt.tgt_ops)
		return;

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe03a,
		    "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
		return;
	}

	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
	    IS_QLA2100(ha))
		return;
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	tgt->irq_cmd_count++;

	switch (code) {
	case MBA_RESET:			/* Reset */
	case MBA_SYSTEM_ERR:		/* System Error */
	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
		    "qla_target(%d): System error async event %#x "
5560
		    "occurred", vha->vp_idx, code);
5561 5562 5563 5564 5565 5566 5567 5568
		break;
	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		break;

	case MBA_LOOP_UP:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5569
		    "qla_target(%d): Async LOOP_UP occurred "
5570 5571 5572
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585
		if (tgt->link_reinit_iocb_pending) {
			qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
		break;
	}

	case MBA_LIP_OCCURRED:
	case MBA_LOOP_DOWN:
	case MBA_LIP_RESET:
	case MBA_RSCN_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5586
		    "qla_target(%d): Async event %#x occurred "
5587 5588 5589
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5590 5591
		break;

5592
	case MBA_REJECTED_FCP_CMD:
5593 5594 5595 5596 5597
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
		    vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5598 5599 5600 5601 5602

		if (le16_to_cpu(mailbox[3]) == 1) {
			/* exchange starvation. */
			vha->hw->exch_starvation++;
			if (vha->hw->exch_starvation > 5) {
5603
				ql_log(ql_log_warn, vha, 0xd03a,
5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617
				    "Exchange starvation-. Resetting RISC\n");

				vha->hw->exch_starvation = 0;
				if (IS_P3P_TYPE(vha->hw))
					set_bit(FCOE_CTX_RESET_NEEDED,
					    &vha->dpc_flags);
				else
					set_bit(ISP_ABORT_NEEDED,
					    &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
		}
		break;

5618 5619 5620
	case MBA_PORT_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
		    "qla_target(%d): Port update async event %#x "
5621
		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5622 5623 5624 5625 5626
		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));

		login_code = le16_to_cpu(mailbox[2]);
5627
		if (login_code == 0x4) {
5628 5629
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
			    "Async MB 2: Got PLOGI Complete\n");
5630 5631
			vha->hw->exch_starvation = 0;
		} else if (login_code == 0x7)
5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
			    "Async MB 2: Port Logged Out\n");
		break;
	default:
		break;
	}

	tgt->irq_cmd_count--;
}

static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
	uint16_t loop_id)
{
5645
	fc_port_t *fcport, *tfcp, *del;
5646
	int rc;
5647 5648
	unsigned long flags;
	u8 newfcport = 0;
5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659

	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
	if (!fcport) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
		    "qla_target(%d): Allocation of tmp FC port failed",
		    vha->vp_idx);
		return NULL;
	}

	fcport->loop_id = loop_id;

5660
	rc = qla24xx_gpdb_wait(vha, fcport, 0);
5661 5662 5663 5664 5665 5666 5667 5668 5669
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
		    "qla_target(%d): Failed to retrieve fcport "
		    "information -- get_port_database() returned %x "
		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
		kfree(fcport);
		return NULL;
	}

5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702
	del = NULL;
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);

	if (tfcp) {
		tfcp->d_id = fcport->d_id;
		tfcp->port_type = fcport->port_type;
		tfcp->supported_classes = fcport->supported_classes;
		tfcp->flags |= fcport->flags;

		del = fcport;
		fcport = tfcp;
	} else {
		if (vha->hw->current_topology == ISP_CFG_F)
			fcport->flags |= FCF_FABRIC_DEVICE;

		list_add_tail(&fcport->list, &vha->vp_fcports);
		if (!IS_SW_RESV_ADDR(fcport->d_id))
		   vha->fcport_count++;
		fcport->login_gen++;
		fcport->disc_state = DSC_LOGIN_COMPLETE;
		fcport->login_succ = 1;
		newfcport = 1;
	}

	fcport->deleted = 0;
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

	switch (vha->host->active_mode) {
	case MODE_INITIATOR:
	case MODE_DUAL:
		if (newfcport) {
			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5703
				ql_dbg(ql_dbg_disc, vha, 0x20fe,
5704 5705 5706 5707
				   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_upd_fcport_work(vha, fcport);
			} else {
5708
				ql_dbg(ql_dbg_disc, vha, 0x20ff,
5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722
				   "%s %d %8phC post gpsc fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_gpsc_work(vha, fcport);
			}
		}
		break;

	case MODE_TARGET:
	default:
		break;
	}
	if (del)
		qla2x00_free_fcport(del);

5723 5724 5725 5726
	return fcport;
}

/* Must be called under tgt_mutex */
5727
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
5728 5729
	uint8_t *s_id)
{
5730
	struct fc_port *sess = NULL;
5731 5732 5733 5734
	fc_port_t *fcport = NULL;
	int rc, global_resets;
	uint16_t loop_id = 0;

5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745
	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
		/*
		 * This is Domain Controller, so it should be
		 * OK to drop SCSI commands from it.
		 */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
		    "Unable to find initiator with S_ID %x:%x:%x",
		    s_id[0], s_id[1], s_id[2]);
		return NULL;
	}

5746 5747
	mutex_lock(&vha->vha_tgt.tgt_mutex);

5748
retry:
5749 5750
	global_resets =
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5751 5752 5753

	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
	if (rc != 0) {
5754 5755
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

5756 5757 5758 5759 5760
		ql_log(ql_log_info, vha, 0xf071,
		    "qla_target(%d): Unable to find "
		    "initiator with S_ID %x:%x:%x",
		    vha->vp_idx, s_id[0], s_id[1],
		    s_id[2]);
5761 5762 5763 5764 5765 5766 5767 5768

		if (rc == -ENOENT) {
			qlt_port_logo_t logo;
			sid_to_portid(s_id, &logo.id);
			logo.cmd_count = 1;
			qlt_send_first_logo(vha, &logo);
		}

5769 5770 5771 5772
		return NULL;
	}

	fcport = qlt_get_port_database(vha, loop_id);
5773 5774
	if (!fcport) {
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
5775
		return NULL;
5776
	}
5777 5778

	if (global_resets !=
5779
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5780 5781 5782 5783
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
		    "qla_target(%d): global reset during session discovery "
		    "(counter was %d, new %d), retrying", vha->vp_idx,
		    global_resets,
5784 5785
		    atomic_read(&vha->vha_tgt.
			qla_tgt->tgt_global_resets_count));
5786 5787 5788 5789 5790
		goto retry;
	}

	sess = qlt_create_sess(vha, fcport, true);

5791 5792
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

5793 5794 5795 5796 5797 5798 5799 5800
	return sess;
}

static void qlt_abort_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5801
	struct fc_port *sess = NULL;
5802
	unsigned long flags = 0, flags2 = 0;
5803 5804 5805 5806
	uint32_t be_s_id;
	uint8_t s_id[3];
	int rc;

5807
	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5808 5809

	if (tgt->tgt_stop)
5810
		goto out_term2;
5811 5812 5813 5814 5815 5816 5817 5818

	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];

	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    (unsigned char *)&be_s_id);
	if (!sess) {
5819
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5820 5821 5822 5823

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

5824
		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5825
		if (!sess)
5826
			goto out_term2;
5827
	} else {
5828
		if (sess->deleted) {
5829
			sess = NULL;
5830
			goto out_term2;
5831 5832
		}

5833
		if (!kref_get_unless_zero(&sess->sess_kref)) {
5834
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
5835 5836 5837 5838 5839
			    "%s: kref_get fail %8phC \n",
			     __func__, sess->port_name);
			sess = NULL;
			goto out_term2;
		}
5840 5841 5842
	}

	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5843 5844 5845
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);

5846 5847 5848 5849
	if (rc != 0)
		goto out_term;
	return;

5850
out_term2:
5851 5852 5853
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5854

5855
out_term:
5856
	spin_lock_irqsave(&ha->hardware_lock, flags);
5857
	qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5858
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5859 5860 5861 5862 5863 5864 5865 5866
}

static void qlt_tmr_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct atio_from_isp *a = &prm->tm_iocb2;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5867
	struct fc_port *sess = NULL;
5868 5869 5870
	unsigned long flags;
	uint8_t *s_id = NULL; /* to hide compiler warnings */
	int rc;
5871
	u64 unpacked_lun;
5872
	int fn;
5873 5874
	void *iocb;

5875
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5876 5877

	if (tgt->tgt_stop)
5878
		goto out_term2;
5879 5880 5881 5882

	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
5883
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5884 5885 5886 5887

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

5888
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5889
		if (!sess)
5890
			goto out_term2;
5891
	} else {
5892
		if (sess->deleted) {
5893
			sess = NULL;
5894
			goto out_term2;
5895 5896
		}

5897
		if (!kref_get_unless_zero(&sess->sess_kref)) {
5898
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
5899 5900 5901
			    "%s: kref_get fail %8phC\n",
			     __func__, sess->port_name);
			sess = NULL;
5902
			goto out_term2;
5903
		}
5904 5905 5906 5907
	}

	iocb = a;
	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5908 5909
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
5910 5911

	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5912
	ha->tgt.tgt_ops->put_sess(sess);
5913
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5914 5915 5916

	if (rc != 0)
		goto out_term;
5917 5918
	return;

5919 5920 5921 5922
out_term2:
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5923
out_term:
5924
	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975
}

static void qlt_sess_work_fn(struct work_struct *work)
{
	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
	struct scsi_qla_host *vha = tgt->vha;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		struct qla_tgt_sess_work_param *prm = list_entry(
		    tgt->sess_works_list.next, typeof(*prm),
		    sess_works_list_entry);

		/*
		 * This work can be scheduled on several CPUs at time, so we
		 * must delete the entry to eliminate double processing
		 */
		list_del(&prm->sess_works_list_entry);

		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

		switch (prm->type) {
		case QLA_TGT_SESS_WORK_ABORT:
			qlt_abort_work(tgt, prm);
			break;
		case QLA_TGT_SESS_WORK_TM:
			qlt_tmr_work(tgt, prm);
			break;
		default:
			BUG_ON(1);
			break;
		}

		spin_lock_irqsave(&tgt->sess_work_lock, flags);

		kfree(prm);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
}

/* Must be called under tgt_host_action_mutex */
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
	struct qla_tgt *tgt;

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

5976 5977 5978 5979 5980 5981
	if (!IS_TGT_MODE_CAPABLE(ha)) {
		ql_log(ql_log_warn, base_vha, 0xe070,
		    "This adapter does not support target mode.\n");
		return 0;
	}

5982
	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
5983
	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5984

5985
	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005

	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
	if (!tgt) {
		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
		    "Unable to allocate struct qla_tgt\n");
		return -ENOMEM;
	}

	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
		base_vha->host->hostt->supported_mode |= MODE_TARGET;

	tgt->ha = ha;
	tgt->vha = base_vha;
	init_waitqueue_head(&tgt->waitQ);
	INIT_LIST_HEAD(&tgt->del_sess_list);
	spin_lock_init(&tgt->sess_work_lock);
	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
	INIT_LIST_HEAD(&tgt->sess_works_list);
	atomic_set(&tgt->tgt_global_resets_count, 0);

6006
	base_vha->vha_tgt.qla_tgt = tgt;
6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020

	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
		"qla_target(%d): using 64 Bit PCI addressing",
		base_vha->vp_idx);
	tgt->tgt_enable_64bit_addr = 1;
	/* 3 is reserved */
	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;

	mutex_lock(&qla_tgt_mutex);
	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
	mutex_unlock(&qla_tgt_mutex);

6021 6022 6023
	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
		ha->tgt.tgt_ops->add_target(base_vha);

6024 6025 6026 6027 6028 6029
	return 0;
}

/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
6030
	if (!vha->vha_tgt.qla_tgt)
6031 6032
		return 0;

6033 6034 6035 6036
	if (vha->fc_vport) {
		qlt_release(vha->vha_tgt.qla_tgt);
		return 0;
	}
6037 6038 6039 6040

	/* free left over qfull cmds */
	qlt_init_term_exchange(vha);

6041
	mutex_lock(&qla_tgt_mutex);
6042
	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6043 6044 6045 6046
	mutex_unlock(&qla_tgt_mutex);

	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
	    vha->host_no, ha);
6047
	qlt_release(vha->vha_tgt.qla_tgt);
6048 6049 6050 6051

	return 0;
}

6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062
void qlt_remove_target_resources(struct qla_hw_data *ha)
{
	struct scsi_qla_host *node;
	u32 key = 0;

	btree_for_each_safe32(&ha->tgt.host_map, key, node)
		btree_remove32(&ha->tgt.host_map, key);

	btree_destroy32(&ha->tgt.host_map);
}

6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
	unsigned char *b)
{
	int i;

	pr_debug("qla2xxx HW vha->node_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->node_name[i]);
	pr_debug("\n");
	pr_debug("qla2xxx HW vha->port_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->port_name[i]);
	pr_debug("\n");

	pr_debug("qla2xxx passed configfs WWPN: ");
	put_unaligned_be64(wwpn, b);
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", b[i]);
	pr_debug("\n");
}

/**
 * qla_tgt_lport_register - register lport with external module
 *
 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
 * @wwpn: Passwd FC target WWPN
 * @callback:  lport initialization callback for tcm_qla2xxx code
 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
 */
6092 6093 6094
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
		       u64 npiv_wwpn, u64 npiv_wwnn,
		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116
{
	struct qla_tgt *tgt;
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha;
	struct Scsi_Host *host;
	unsigned long flags;
	int rc;
	u8 b[WWN_SIZE];

	mutex_lock(&qla_tgt_mutex);
	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
		vha = tgt->vha;
		ha = vha->hw;

		host = vha->host;
		if (!host)
			continue;

		if (!(host->hostt->supported_mode & MODE_TARGET))
			continue;

		spin_lock_irqsave(&ha->hardware_lock, flags);
6117
		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6118 6119 6120 6121 6122
			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
			    host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6123 6124 6125 6126 6127 6128
		if (tgt->tgt_stop) {
			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
				 host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6129 6130 6131 6132 6133 6134 6135 6136
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

		if (!scsi_host_get(host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe068,
			    "Unable to scsi_host_get() for"
			    " qla2xxx scsi_host\n");
			continue;
		}
6137
		qlt_lport_dump(vha, phys_wwpn, b);
6138 6139 6140 6141 6142

		if (memcmp(vha->port_name, b, WWN_SIZE)) {
			scsi_host_put(host);
			continue;
		}
6143 6144 6145 6146
		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
		if (rc != 0)
			scsi_host_put(host);

6147
		mutex_unlock(&qla_tgt_mutex);
6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167
		return rc;
	}
	mutex_unlock(&qla_tgt_mutex);

	return -ENODEV;
}
EXPORT_SYMBOL(qlt_lport_register);

/**
 * qla_tgt_lport_deregister - Degister lport
 *
 * @vha:  Registered scsi_qla_host pointer
 */
void qlt_lport_deregister(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	struct Scsi_Host *sh = vha->host;
	/*
	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
	 */
6168
	vha->vha_tgt.target_lport_ptr = NULL;
6169 6170 6171 6172 6173 6174 6175 6176 6177
	ha->tgt.tgt_ops = NULL;
	/*
	 * Release the Scsi_Host reference for the underlying qla2xxx host
	 */
	scsi_host_put(sh);
}
EXPORT_SYMBOL(qlt_lport_deregister);

/* Must be called under HW lock */
6178
static void qlt_set_mode(struct scsi_qla_host *vha)
6179 6180 6181 6182 6183 6184 6185
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_TARGET;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6186 6187 6188 6189
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_DUAL;
6190 6191 6192 6193 6194 6195 6196
		break;
	default:
		break;
	}
}

/* Must be called under HW lock */
6197
static void qlt_clear_mode(struct scsi_qla_host *vha)
6198 6199 6200 6201 6202 6203 6204 6205 6206
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_INITIATOR;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6207 6208
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_INITIATOR;
6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223
		break;
	default:
		break;
	}
}

/*
 * qla_tgt_enable_vha - NO LOCK HELD
 *
 * host_reset, bring up w/ Target Mode Enabled
 */
void
qlt_enable_vha(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
6224
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6225
	unsigned long flags;
6226
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6227
	int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe069,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	tgt->tgt_stopped = 0;
	qlt_set_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

6242 6243 6244 6245
	if (vha->vp_idx) {
		qla24xx_disable_vp(vha);
		qla24xx_enable_vp(vha);
	} else {
6246
		if (ha->msix_entries) {
6247
			ql_dbg(ql_dbg_tgt, vha, 0xe081,
6248 6249 6250 6251 6252 6253 6254 6255 6256
			    "%s: host%ld : vector %d cpu %d\n",
			    __func__, vha->host_no,
			    ha->msix_entries[rspq_ent].vector,
			    ha->msix_entries[rspq_ent].cpuid);

			ha->tgt.rspq_vector_cpuid =
			    ha->msix_entries[rspq_ent].cpuid;
		}

6257 6258 6259 6260
		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
		qla2xxx_wake_dpc(base_vha);
		qla2x00_wait_for_hba_online(base_vha);
	}
6261 6262 6263 6264 6265 6266 6267 6268
}
EXPORT_SYMBOL(qlt_enable_vha);

/*
 * qla_tgt_disable_vha - NO LOCK HELD
 *
 * Disable Target Mode and reset the adapter
 */
6269
static void qlt_disable_vha(struct scsi_qla_host *vha)
6270 6271
{
	struct qla_hw_data *ha = vha->hw;
6272
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299
	unsigned long flags;

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_clear_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
	qla2xxx_wake_dpc(vha);
	qla2x00_wait_for_hba_online(vha);
}

/*
 * Called from qla_init.c:qla24xx_vport_create() contex to setup
 * the target mode specific struct scsi_qla_host and struct qla_hw_data
 * members.
 */
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
6300 6301 6302 6303
	vha->vha_tgt.qla_tgt = NULL;

	mutex_init(&vha->vha_tgt.tgt_mutex);
	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6304 6305 6306 6307 6308 6309 6310 6311 6312 6313

	qlt_clear_mode(vha);

	/*
	 * NOTE: Currently the value is kept the same for <24xx and
	 * >=24xx ISPs. If it is necessary to change it,
	 * the check should be added for specific ISPs,
	 * assigning the value appropriately.
	 */
	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6314 6315

	qlt_add_target(ha, vha);
6316 6317 6318 6319 6320 6321 6322 6323 6324
}

void
qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
{
	/*
	 * FC-4 Feature bit 0 indicates target functionality to the name server.
	 */
	if (qla_tgt_mode_enabled(vha)) {
6325
		ct_req->req.rff_id.fc4_feature = BIT_0;
6326 6327
	} else if (qla_ini_mode_enabled(vha)) {
		ct_req->req.rff_id.fc4_feature = BIT_1;
6328 6329
	} else if (qla_dual_mode_enabled(vha))
		ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347
}

/*
 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
 * @ha: HA context
 *
 * Beginning of ATIO ring has initialization control block already built
 * by nvram config routine.
 *
 * Returns 0 on success.
 */
void
qlt_init_atio_q_entries(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t cnt;
	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;

6348
	if (qla_ini_mode_enabled(vha))
6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362
		return;

	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
		pkt->u.raw.signature = ATIO_PROCESSED;
		pkt++;
	}

}

/*
 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
 * @ha: SCSI driver HA context
 */
void
6363
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6364 6365 6366 6367 6368
{
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *pkt;
	int cnt, i;

6369
	if (!ha->flags.fw_started)
6370 6371
		return;

6372 6373
	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6374 6375 6376
		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		cnt = pkt->u.raw.entry_count;

6377 6378 6379 6380 6381 6382
		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
			/*
			 * This packet is corrupted. The header + payload
			 * can not be trusted. There is no point in passing
			 * it further up.
			 */
6383
			ql_log(ql_log_warn, vha, 0xd03c,
6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394
			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
			    pkt->u.isp24.fcp_hdr.s_id,
			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);

			adjust_corrupted_atio(pkt);
			qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
		} else {
			qlt_24xx_atio_pkt_all_vps(vha,
			    (struct atio_from_isp *)pkt, ha_locked);
		}
6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410

		for (i = 0; i < cnt; i++) {
			ha->tgt.atio_ring_index++;
			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
				ha->tgt.atio_ring_index = 0;
				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
			} else
				ha->tgt.atio_ring_ptr++;

			pkt->u.raw.signature = ATIO_PROCESSED;
			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		}
		wmb();
	}

	/* Adjust ring index */
6411
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6412
	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6413 6414 6415
}

void
6416
qlt_24xx_config_rings(struct scsi_qla_host *vha)
6417 6418
{
	struct qla_hw_data *ha = vha->hw;
6419 6420
	if (!QLA_TGT_MODE_ENABLED())
		return;
6421

6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433
	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));

	if (IS_ATIO_MSIX_CAPABLE(ha)) {
		struct qla_msix_entry *msix = &ha->msix_entries[2];
		struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;

		icb->msix_atio = cpu_to_le16(msix->entry);
		ql_dbg(ql_dbg_init, vha, 0xf072,
		    "Registering ICB vector 0x%x for atio que.\n",
		    msix->entry);
6434 6435 6436 6437 6438 6439 6440
	}
}

void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
	struct qla_hw_data *ha = vha->hw;
6441 6442 6443

	if (!QLA_TGT_MODE_ENABLED())
		return;
6444

6445
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6458
		if (qla_tgt_mode_enabled(vha))
6459
			nv->exchange_count = cpu_to_le16(0xFFFF);
6460 6461
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6462 6463

		/* Enable target mode */
6464
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6465 6466

		/* Disable ini mode, if requested */
6467
		if (qla_tgt_mode_enabled(vha))
6468
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6469 6470

		/* Disable Full Login after LIP */
6471
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6472
		/* Enable initial LIP */
6473
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6474 6475 6476 6477 6478 6479 6480
		if (ql2xtgt_tape_enable)
			/* Enable FC Tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC Tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6481
		/* Disable Full Login after LIP */
6482
		nv->host_p &= cpu_to_le32(~BIT_10);
6483 6484 6485 6486 6487 6488 6489

		/*
		 * clear BIT 15 explicitly as we have seen at least
		 * a couple of instances where this was set and this
		 * was causing the firmware to not be initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6490
		/* Enable target PRLI control */
6491
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

	if (ha->tgt.enable_class_2) {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6510
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6511 6512 6513 6514
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6515
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6516 6517 6518 6519 6520 6521 6522 6523 6524
	}
}

void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_24xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

Q
Quinn Tran 已提交
6525 6526 6527
	if (!QLA_TGT_MODE_ENABLED())
		return;

6528 6529
	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6530
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6531
	}
Q
Quinn Tran 已提交
6532 6533 6534 6535 6536 6537 6538 6539

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}
6540 6541
}

6542 6543 6544 6545 6546 6547 6548 6549
void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

6550
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6563
		if (qla_tgt_mode_enabled(vha))
6564
			nv->exchange_count = cpu_to_le16(0xFFFF);
6565 6566
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6567 6568

		/* Enable target mode */
6569
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6570 6571

		/* Disable ini mode, if requested */
6572
		if (qla_tgt_mode_enabled(vha))
6573
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6574
		/* Disable Full Login after LIP */
6575
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6576
		/* Enable initial LIP */
6577
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6578 6579 6580 6581 6582 6583 6584
		/*
		 * clear BIT 15 explicitly as we have seen at
		 * least a couple of instances where this was set
		 * and this was causing the firmware to not be
		 * initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6585 6586 6587 6588 6589 6590 6591
		if (ql2xtgt_tape_enable)
			/* Enable FC tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6592
		/* Disable Full Login after LIP */
6593
		nv->host_p &= cpu_to_le32(~BIT_10);
6594
		/* Enable target PRLI control */
6595
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

	if (ha->tgt.enable_class_2) {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6614
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6615 6616 6617 6618
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6619
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633
	}
}

void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_81xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6634
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6635
	}
Q
Quinn Tran 已提交
6636 6637 6638 6639 6640 6641 6642 6643 6644

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}

6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655
}

void
qlt_83xx_iospace_config(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	ha->msix_count += 1; /* For ATIO Q */
}

6656 6657 6658 6659 6660 6661 6662 6663 6664
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
	struct sts_entry_24xx *pkt)
{
	switch (pkt->entry_type) {
	case ABTS_RECV_24XX:
	case ABTS_RESP_24XX:
	case CTIO_TYPE7:
	case NOTIFY_ACK_TYPE:
6665
	case CTIO_CRC2:
6666 6667 6668 6669 6670 6671 6672 6673 6674 6675
		return 1;
	default:
		return 0;
	}
}

void
qlt_modify_vp_config(struct scsi_qla_host *vha,
	struct vp_config_entry_24xx *vpmod)
{
6676 6677
	/* enable target mode.  Bit5 = 1 => disable */
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
6678
		vpmod->options_idx1 &= ~BIT_5;
6679

6680
	/* Disable ini mode, if requested.  bit4 = 1 => disable */
6681
	if (qla_tgt_mode_enabled(vha))
6682 6683 6684 6685 6686 6687
		vpmod->options_idx1 &= ~BIT_4;
}

void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
6688 6689
	int rc;

6690 6691 6692
	if (!QLA_TGT_MODE_ENABLED())
		return;

6693
	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6694 6695 6696 6697 6698 6699 6700
		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
	} else {
		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
	}

6701 6702
	mutex_init(&base_vha->vha_tgt.tgt_mutex);
	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6703 6704 6705 6706 6707

	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
	    qlt_unknown_atio_work_fn);

6708
	qlt_clear_mode(base_vha);
6709 6710 6711

	rc = btree_init32(&ha->tgt.host_map);
	if (rc)
6712
		ql_log(ql_log_info, base_vha, 0xd03d,
6713 6714 6715
		    "Unable to initialize ha->host_map btree\n");

	qlt_update_vp_map(base_vha, SET_VP_IDX);
6716 6717
}

6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729
irqreturn_t
qla83xx_msix_atio_q(int irq, void *dev_id)
{
	struct rsp_que *rsp;
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	unsigned long flags;

	rsp = (struct rsp_que *) dev_id;
	ha = rsp->hw;
	vha = pci_get_drvdata(ha->pdev);

6730
	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6731

6732
	qlt_24xx_process_atio_queue(vha, 0);
6733

6734
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6735 6736 6737 6738

	return IRQ_HANDLED;
}

6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757
static void
qlt_handle_abts_recv_work(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
		struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

	if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
		return;

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
	qlt_24xx_process_atio_queue(vha, 0);
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6758 6759

	kfree(op);
6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784
}

void
qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
{
	struct qla_tgt_sess_op *op;

	op = kzalloc(sizeof(*op), GFP_ATOMIC);

	if (!op) {
		/* do not reach for ATIO queue here.  This is best effort err
		 * recovery at this point.
		 */
		qlt_response_pkt_all_vps(vha, pkt);
		return;
	}

	memcpy(&op->atio, pkt, sizeof(*pkt));
	op->vha = vha;
	op->chip_reset = vha->hw->chip_reset;
	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
	queue_work(qla_tgt_wq, &op->work);
	return;
}

6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
	if (!ha->tgt.tgt_vp_map)
		return -ENOMEM;

	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
	    &ha->tgt.atio_dma, GFP_KERNEL);
	if (!ha->tgt.atio_ring) {
		kfree(ha->tgt.tgt_vp_map);
		return -ENOMEM;
	}
	return 0;
}

void
qlt_mem_free(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.atio_ring) {
		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
		    ha->tgt.atio_dma);
	}
	kfree(ha->tgt.tgt_vp_map);
}

/* vport_slock to be held by the caller */
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
6824 6825 6826 6827
	void *slot;
	u32 key;
	int rc;

6828 6829 6830
	if (!QLA_TGT_MODE_ENABLED())
		return;

6831 6832
	key = vha->d_id.b24;

6833 6834 6835 6836 6837
	switch (cmd) {
	case SET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
		break;
	case SET_AL_PA:
6838 6839
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (!slot) {
6840
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
6841 6842 6843 6844
			    "Save vha in host_map %p %06x\n", vha, key);
			rc = btree_insert32(&vha->hw->tgt.host_map,
				key, vha, GFP_ATOMIC);
			if (rc)
6845
				ql_log(ql_log_info, vha, 0xd03e,
6846 6847 6848 6849
				    "Unable to insert s_id into host_map: %06x\n",
				    key);
			return;
		}
6850 6851
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
		    "replace existing vha in host_map %p %06x\n", vha, key);
6852
		btree_update32(&vha->hw->tgt.host_map, key, vha);
6853 6854 6855 6856 6857
		break;
	case RESET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
		break;
	case RESET_AL_PA:
6858
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
6859 6860 6861 6862 6863
		   "clear vha in host_map %p %06x\n", vha, key);
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (slot)
			btree_remove32(&vha->hw->tgt.host_map, key);
		vha->d_id.b24 = 0;
6864 6865 6866 6867
		break;
	}
}

6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
	unsigned long flags;
	struct qla_hw_data *ha = vha->hw;

	if (!vha->d_id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	} else if (vha->d_id.b24 != id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		qlt_update_vp_map(vha, RESET_AL_PA);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	}
}

6887 6888 6889 6890 6891 6892 6893 6894
static int __init qlt_parse_ini_mode(void)
{
	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6895 6896
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919
	else
		return false;

	return true;
}

int __init qlt_init(void)
{
	int ret;

	if (!qlt_parse_ini_mode()) {
		ql_log(ql_log_fatal, NULL, 0xe06b,
		    "qlt_parse_ini_mode() failed\n");
		return -EINVAL;
	}

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
	    qla_tgt_mgmt_cmd), 0, NULL);
	if (!qla_tgt_mgmt_cmd_cachep) {
6920
		ql_log(ql_log_fatal, NULL, 0xd04b,
6921
		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6922
		return -ENOMEM;
6923 6924
	}

6925
	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6926 6927
	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
	    0, NULL);
6928 6929 6930 6931 6932 6933 6934 6935

	if (!qla_tgt_plogi_cachep) {
		ql_log(ql_log_fatal, NULL, 0xe06d,
		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
		ret = -ENOMEM;
		goto out_mgmt_cmd_cachep;
	}

6936 6937 6938 6939 6940 6941
	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
	if (!qla_tgt_mgmt_cmd_mempool) {
		ql_log(ql_log_fatal, NULL, 0xe06e,
		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
		ret = -ENOMEM;
6942
		goto out_plogi_cachep;
6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958
	}

	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
	if (!qla_tgt_wq) {
		ql_log(ql_log_fatal, NULL, 0xe06f,
		    "alloc_workqueue for qla_tgt_wq failed\n");
		ret = -ENOMEM;
		goto out_cmd_mempool;
	}
	/*
	 * Return 1 to signal that initiator-mode is being disabled
	 */
	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;

out_cmd_mempool:
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6959 6960
out_plogi_cachep:
	kmem_cache_destroy(qla_tgt_plogi_cachep);
6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972
out_mgmt_cmd_cachep:
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
	return ret;
}

void qlt_exit(void)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	destroy_workqueue(qla_tgt_wq);
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6973
	kmem_cache_destroy(qla_tgt_plogi_cachep);
6974 6975
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
}