qla_target.c 200.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
 *
 *  based on qla2x00t.c code:
 *
 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
 *  Copyright (C) 2004 - 2005 Leonid Stoljar
 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
 *  Copyright (C) 2006 - 2010 ID7 Ltd.
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
13
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation, version 2
 *  of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>

#include "qla_def.h"
#include "qla_target.h"

45 46 47 48 49
static int ql2xtgt_tape_enable;
module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xtgt_tape_enable,
		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");

50 51 52 53 54 55 56 57
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
	"Determines when initiator mode will be enabled. Possible values: "
	"\"exclusive\" - initiator mode will be enabled on load, "
	"disabled on enabling target mode and then on disabling target mode "
	"enabled back; "
	"\"disabled\" - initiator mode will never be enabled; "
58 59
	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
	"when ready "
60 61
	"\"enabled\" (default) - initiator mode will always stay enabled.");

62
static int ql_dm_tgt_ex_pct = 0;
63 64 65 66 67 68
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
	"For Dual Mode (qlini_mode=dual), this parameter determines "
	"the percentage of exchanges/cmds FW will allocate resources "
	"for Target mode.");

69 70 71 72 73 74 75
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
    "User to control IRQ placement via smp_affinity."
    "Valid with qlini_mode=disabled."
    "1(default): enable");

76
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
77

78 79
static int qla_sam_status = SAM_STAT_BUSY;
static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * From scsi/fc/fc_fcp.h
 */
enum fcp_resp_rsp_codes {
	FCP_TMF_CMPL = 0,
	FCP_DATA_LEN_INVALID = 1,
	FCP_CMND_FIELDS_INVALID = 2,
	FCP_DATA_PARAM_MISMATCH = 3,
	FCP_TMF_REJECTED = 4,
	FCP_TMF_FAILED = 5,
	FCP_TMF_INVALID_LUN = 9,
};

/*
 * fc_pri_ta from scsi/fc/fc_fcp.h
 */
#define FCP_PTA_SIMPLE      0   /* simple task attribute */
#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
#define FCP_PTA_ORDERED     2   /* ordered task attribute */
100
#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#define FCP_PTA_MASK        7   /* mask for task attribute field */
#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */

/*
 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 * must be called under HW lock and could unlock/lock it inside.
 * It isn't an issue, since in the current implementation on the time when
 * those functions are called:
 *
 *   - Either context is IRQ and only IRQ handler can modify HW data,
 *     including rings related fields,
 *
 *   - Or access to target mode variables from struct qla_tgt doesn't
 *     cross those functions boundaries, except tgt_stop, which
 *     additionally protected by irq_cmd_count.
 */
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
120
	struct atio_from_isp *pkt, uint8_t);
121 122
static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
	response_t *pkt);
123
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
124
	int fn, void *iocb, int flags);
125
static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
126
	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
127 128
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull);
129
static void qlt_disable_vha(struct scsi_qla_host *vha);
130
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
131
static void qlt_send_notify_ack(struct qla_qpair *qpair,
132 133 134
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
135 136
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked);
137 138 139
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
	fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
140 141
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
	struct abts_recv_from_24xx *);
142 143
static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
    uint16_t);
144 145
static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
static inline uint32_t qlt_make_handle(struct qla_qpair *);
146

147 148 149 150
/*
 * Global Variables
 */
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
151
struct kmem_cache *qla_tgt_plogi_cachep;
152 153 154 155 156
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);

157 158 159 160 161 162 163 164 165 166 167 168 169 170
static const char *prot_op_str(u32 prot_op)
{
	switch (prot_op) {
	case TARGET_PROT_NORMAL:	return "NORMAL";
	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
	default:			return "UNKNOWN";
	}
}

171 172 173 174 175 176 177 178 179 180
/* This API intentionally takes dest as a parameter, rather than returning
 * int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
{
	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
	*dest = atomic_inc_return(&base_vha->generation_tick);
	/* memory barrier */
	wmb();
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
	/* Send marker if required */
	if (unlikely(vha->marker_needed != 0)) {
		int rc = qla2x00_issue_marker(vha, vha_locked);
		if (rc != QLA_SUCCESS) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
			    "qla_target(%d): issue_marker() failed\n",
			    vha->vp_idx);
		}
		return rc;
	}
	return QLA_SUCCESS;
}

static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
	uint8_t *d_id)
{
201 202
	struct scsi_qla_host *host;
	uint32_t key = 0;
203

204 205
	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
	    (vha->d_id.b.al_pa == d_id[2]))
206 207
		return vha;

208 209 210
	key  = (uint32_t)d_id[0] << 16;
	key |= (uint32_t)d_id[1] <<  8;
	key |= (uint32_t)d_id[2];
211

212 213
	host = btree_lookup32(&vha->hw->tgt.host_map, key);
	if (!host)
214
		ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
215
		    "Unable to find host %06x\n", key);
216 217

	return host;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
}

static inline
struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
	uint16_t vp_idx)
{
	struct qla_hw_data *ha = vha->hw;

	if (vha->vp_idx == vp_idx)
		return vha;

	BUG_ON(ha->tgt.tgt_vp_map == NULL);
	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
		return ha->tgt.tgt_vp_map[vp_idx].vha;

	return NULL;
}

236 237 238 239 240 241 242
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);

	vha->hw->tgt.num_pend_cmds++;
243 244
	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
		vha->qla_stats.stat_max_pend_cmds =
245 246 247 248 249 250 251 252 253 254 255 256
			vha->hw->tgt.num_pend_cmds;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
	vha->hw->tgt.num_pend_cmds--;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}

257 258

static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
259
	struct atio_from_isp *atio, uint8_t ha_locked)
260 261 262 263 264 265
{
	struct qla_tgt_sess_op *u;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;

	if (tgt->tgt_stop) {
266 267 268
		ql_dbg(ql_dbg_async, vha, 0x502c,
		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
		    vha->vp_idx);
269 270 271 272
		goto out_term;
	}

	u = kzalloc(sizeof(*u), GFP_ATOMIC);
273
	if (u == NULL)
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		goto out_term;

	u->vha = vha;
	memcpy(&u->atio, atio, sizeof(*atio));
	INIT_LIST_HEAD(&u->cmd_list);

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	schedule_delayed_work(&vha->unknown_atio_work, 1);

out:
	return;

out_term:
290
	qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
291 292 293 294 295 296 297 298 299 300 301 302 303 304
	goto out;
}

static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u, *t;
	scsi_qla_host_t *host;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;
	uint8_t queued = 0;

	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
		if (u->aborted) {
305 306
			ql_dbg(ql_dbg_async, vha, 0x502e,
			    "Freeing unknown %s %p, because of Abort\n",
307
			    "ATIO_TYPE7", u);
308 309
			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
			    &u->atio, ha_locked, 0);
310 311 312 313 314
			goto abort;
		}

		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
		if (host != NULL) {
315
			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
316
			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
317 318
			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
		} else if (tgt->tgt_stop) {
319
			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
320 321
			    "Freeing unknown %s %p, because tgt is being stopped\n",
			    "ATIO_TYPE7", u);
322 323
			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
			    &u->atio, ha_locked, 0);
324
		} else {
325
			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
326
			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
			if (!queued) {
				queued = 1;
				schedule_delayed_work(&vha->unknown_atio_work,
				    1);
			}
			continue;
		}

abort:
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
		list_del(&u->cmd_list);
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
		kfree(u);
	}
}

void qlt_unknown_atio_work_fn(struct work_struct *work)
{
	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
	    struct scsi_qla_host, unknown_atio_work);

	qlt_try_to_dequeue_unknown_atios(vha, 0);
}

351
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
352
	struct atio_from_isp *atio, uint8_t ha_locked)
353
{
354 355 356 357 358
	ql_dbg(ql_dbg_tgt, vha, 0xe072,
		"%s: qla_target(%d): type %x ox_id %04x\n",
		__func__, vha->vp_idx, atio->u.raw.entry_type,
		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));

359 360 361 362 363 364 365 366 367 368 369 370
	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
	{
		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
		    atio->u.isp24.fcp_hdr.d_id);
		if (unlikely(NULL == host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
			    "qla_target(%d): Received ATIO_TYPE7 "
			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
			    atio->u.isp24.fcp_hdr.d_id[0],
			    atio->u.isp24.fcp_hdr.d_id[1],
			    atio->u.isp24.fcp_hdr.d_id[2]);
371 372 373


			qlt_queue_unknown_atio(vha, atio, ha_locked);
374 375
			break;
		}
376 377 378
		if (unlikely(!list_empty(&vha->unknown_atio_list)))
			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);

379
		qlt_24xx_atio_pkt(host, atio, ha_locked);
380 381 382 383 384 385 386 387 388
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)atio;

389 390
		qlt_issue_marker(vha, ha_locked);

391 392 393 394 395 396 397 398 399 400 401 402 403
		if ((entry->u.isp24.vp_index != 0xFF) &&
		    (entry->u.isp24.nport_handle != 0xFFFF)) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
				    "qla_target(%d): Received "
				    "ATIO (IMMED_NOTIFY_TYPE) "
				    "with unknown vp_index %d\n",
				    vha->vp_idx, entry->u.isp24.vp_index);
				break;
			}
		}
404
		qlt_24xx_atio_pkt(host, atio, ha_locked);
405 406 407
		break;
	}

408 409 410 411 412 413 414 415 416 417 418
	case VP_RPT_ID_IOCB_TYPE:
		qla24xx_report_id_acquisition(vha,
			(struct vp_rpt_id_entry_24xx *)atio);
		break;

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
			(struct abts_recv_from_24xx *)atio;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
			entry->vp_index);
419 420
		unsigned long flags;

421
		if (unlikely(!host)) {
422
			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
423 424 425 426 427
			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
428 429 430 431 432
		if (!ha_locked)
			spin_lock_irqsave(&host->hw->hardware_lock, flags);
		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
		if (!ha_locked)
			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
433 434 435 436 437
		break;
	}

	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */

438 439 440 441 442 443 444
	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe040,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

445
	return false;
446 447
}

448 449
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
450 451
{
	switch (pkt->entry_type) {
452 453 454 455
	case CTIO_CRC2:
		ql_dbg(ql_dbg_tgt, vha, 0xe073,
			"qla_target(%d):%s: CRC2 Response pkt\n",
			vha->vp_idx, __func__);
456
		/* fall through */
457 458 459 460 461 462 463 464 465 466 467 468
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe041,
			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
469
		qlt_response_pkt(host, rsp, pkt);
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)pkt;

		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe042,
			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->u.isp24.vp_index);
			break;
		}
487
		qlt_response_pkt(host, rsp, pkt);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
		break;
	}

	case NOTIFY_ACK_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;

		if (0xFF != entry->u.isp24.vp_index) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe043,
				    "qla_target(%d): Response "
				    "pkt (NOTIFY_ACK_TYPE) "
				    "received, with unknown "
				    "vp_index %d\n", vha->vp_idx,
				    entry->u.isp24.vp_index);
				break;
			}
		}
509
		qlt_response_pkt(host, rsp, pkt);
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		break;
	}

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
		    (struct abts_recv_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe044,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
526
		qlt_response_pkt(host, rsp, pkt);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
		break;
	}

	case ABTS_RESP_24XX:
	{
		struct abts_resp_to_24xx *entry =
		    (struct abts_resp_to_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe045,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
543
		qlt_response_pkt(host, rsp, pkt);
544 545 546
		break;
	}
	default:
547
		qlt_response_pkt(vha, rsp, pkt);
548 549 550 551 552
		break;
	}

}

553 554 555
/*
 * All qlt_plogi_ack_t operations are protected by hardware_lock
 */
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	struct qla_work_evt *e;
	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.nack.fcport = fcport;
	e->u.nack.type = type;
	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
	return qla2x00_post_work(vha, e);
}

static
571
void qla2x00_async_nack_sp_done(void *s, int res)
572 573
{
	struct srb *sp = (struct srb *)s;
574
	struct scsi_qla_host *vha = sp->vha;
575 576
	unsigned long flags;

577 578 579
	ql_dbg(ql_dbg_disc, vha, 0x20f2,
	    "Async done-%s res %x %8phC  type %d\n",
	    sp->name, res, sp->fcport->port_name, sp->type);
580 581 582

	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	sp->fcport->flags &= ~FCF_ASYNC_SENT;
583
	sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
584 585 586 587 588 589

	switch (sp->type) {
	case SRB_NACK_PLOGI:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
		sp->fcport->logout_on_delete = 1;
590
		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
591
		sp->fcport->send_els_logo = 0;
592 593 594 595 596
		break;

	case SRB_NACK_PRLI:
		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
		sp->fcport->deleted = 0;
597
		sp->fcport->send_els_logo = 0;
598 599 600 601 602 603

		if (!sp->fcport->login_succ &&
		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
			sp->fcport->login_succ = 1;

			vha->fcport_count++;
604
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
605
			qla24xx_sched_upd_fcport(sp->fcport);
606
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
607 608 609 610 611
		} else {
			sp->fcport->login_retry = 0;
			sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
			sp->fcport->deleted = 0;
			sp->fcport->logout_on_delete = 1;
612 613 614 615 616 617 618 619 620 621 622
		}
		break;

	case SRB_NACK_LOGO:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
		break;
	}
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

623
	sp->free(sp);
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
}

int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	int rval = QLA_FUNCTION_FAILED;
	srb_t *sp;
	char *c = NULL;

	fcport->flags |= FCF_ASYNC_SENT;
	switch (type) {
	case SRB_NACK_PLOGI:
		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
		c = "PLOGI";
		break;
	case SRB_NACK_PRLI:
		fcport->fw_login_state = DSC_LS_PRLI_PEND;
641
		fcport->deleted = 0;
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
		c = "PRLI";
		break;
	case SRB_NACK_LOGO:
		fcport->fw_login_state = DSC_LS_LOGO_PEND;
		c = "LOGO";
		break;
	}

	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
	if (!sp)
		goto done;

	sp->type = type;
	sp->name = "nack";

657
	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
658 659 660 661 662 663 664 665 666
	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);

	sp->u.iocb_cmd.u.nack.ntfy = ntfy;
	sp->done = qla2x00_async_nack_sp_done;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS)
		goto done_free_sp;

667 668 669
	ql_dbg(ql_dbg_disc, vha, 0x20f4,
	    "Async-%s %8phC hndl %x %s\n",
	    sp->name, fcport->port_name, sp->handle, c);
670 671 672 673

	return rval;

done_free_sp:
674
	sp->free(sp);
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
done:
	fcport->flags &= ~FCF_ASYNC_SENT;
	return rval;
}

void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
	fc_port_t *t;
	unsigned long flags;

	switch (e->u.nack.type) {
	case SRB_NACK_PRLI:
		mutex_lock(&vha->vha_tgt.tgt_mutex);
		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
		if (t) {
691
			ql_log(ql_log_info, vha, 0xd034,
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
			    "%s create sess success %p", __func__, t);
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
			/* create sess has an extra kref */
			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
		}
		break;
	}
	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
}

void qla24xx_delete_sess_fn(struct work_struct *work)
{
	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
	struct qla_hw_data *ha = fcport->vha->hw;
	unsigned long flags;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);

	if (fcport->se_sess) {
		ha->tgt.tgt_ops->shutdown_sess(fcport);
		ha->tgt.tgt_ops->put_sess(fcport);
	} else {
		qlt_unreg_sess(fcport);
	}
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

/*
 * Called from qla2x00_reg_remote_port()
 */
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct fc_port *sess = fcport;
	unsigned long flags;

	if (!vha->hw->tgt.tgt_ops)
		return;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (tgt->tgt_stop) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (fcport->disc_state == DSC_DELETE_PEND) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (!sess->se_sess) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		mutex_lock(&vha->vha_tgt.tgt_mutex);
		sess = qlt_create_sess(vha, fcport, false);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	} else {
		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		if (!kref_get_unless_zero(&sess->sess_kref)) {
760
			ql_dbg(ql_dbg_disc, vha, 0x2107,
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
			    "%s: kref_get fail sess %8phC \n",
			    __func__, sess->port_name);
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
		    "qla_target(%u): %ssession for port %8phC "
		    "(loop ID %d) reappeared\n", vha->vp_idx,
		    sess->local ? "local " : "", sess->port_name, sess->loop_id);

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
		    "Reappeared sess %p\n", sess);

		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
		    fcport->loop_id,
		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
	}

	if (sess && sess->local) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
		    "qla_target(%u): local session for "
		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
		    fcport->port_name, sess->loop_id);
		sess->local = 0;
	}
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
790 791 792 793 794 795

/*
 * This is a zero-base ref-counting solution, since hardware_lock
 * guarantees that ref_count is not modified concurrently.
 * Upon successful return content of iocb is undefined
 */
796
static struct qlt_plogi_ack_t *
797 798 799
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
		       struct imm_ntfy_from_isp *iocb)
{
800
	struct qlt_plogi_ack_t *pla;
801 802 803

	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
		if (pla->id.b24 == id->b24) {
804 805 806 807
			ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
			    "%s %d %8phC Term INOT due to new INOT",
			    __func__, __LINE__,
			    pla->iocb.u.isp24.port_name);
808
			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
809
			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
810 811 812 813 814 815 816 817 818 819 820 821
			return pla;
		}
	}

	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
	if (!pla) {
		ql_dbg(ql_dbg_async, vha, 0x5088,
		       "qla_target(%d): Allocation of plogi_ack failed\n",
		       vha->vp_idx);
		return NULL;
	}

822
	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
823 824 825 826 827 828
	pla->id = *id;
	list_add_tail(&pla->list, &vha->plogi_ack_list);

	return pla;
}

829
void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
830
    struct qlt_plogi_ack_t *pla)
831
{
832
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
833 834 835 836
	port_id_t port_id;
	uint16_t loop_id;
	fc_port_t *fcport = pla->fcport;

837 838 839 840 841 842
	BUG_ON(!pla->ref_count);
	pla->ref_count--;

	if (pla->ref_count)
		return;

843
	ql_dbg(ql_dbg_disc, vha, 0x5089,
844
	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
845 846 847 848 849
	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
	    iocb->u.isp24.port_id[0],
	    le16_to_cpu(iocb->u.isp24.nport_handle),
	    iocb->u.isp24.exchange_address, iocb->ox_id);
850 851 852 853 854 855 856 857 858 859

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	fcport->loop_id = loop_id;
	fcport->d_id = port_id;
860 861 862 863
	if (iocb->u.isp24.status_subcode == ELS_PLOGI)
		qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
	else
		qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
864 865 866 867 868 869 870

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
	}
871 872 873 874 875

	list_del(&pla->list);
	kmem_cache_free(qla_tgt_plogi_cachep, pla);
}

876
void
877 878
qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
    struct fc_port *sess, enum qlt_plogi_link_t link)
879
{
880
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
881 882 883
	/* Inc ref_count first because link might already be pointing at pla */
	pla->ref_count++;

884 885 886 887 888 889 890 891
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
		sess, link, sess->port_name,
		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		pla->ref_count, pla, link);

892 893 894 895 896 897 898 899 900 901 902
	if (link == QLT_PLOGI_LINK_CONFLICT) {
		switch (sess->disc_state) {
		case DSC_DELETED:
		case DSC_DELETE_PEND:
			pla->ref_count--;
			return;
		default:
			break;
		}
	}

903 904 905
	if (sess->plogi_link[link])
		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);

906 907
	if (link == QLT_PLOGI_LINK_SAME_WWN)
		pla->fcport = sess;
908 909 910 911

	sess->plogi_link[link] = pla;
}

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
typedef struct {
	/* These fields must be initialized by the caller */
	port_id_t id;
	/*
	 * number of cmds dropped while we were waiting for
	 * initiator to ack LOGO initialize to 1 if LOGO is
	 * triggered by a command, otherwise, to 0
	 */
	int cmd_count;

	/* These fields are used by callee */
	struct list_head list;
} qlt_port_logo_t;

static void
qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
{
	qlt_port_logo_t *tmp;
	int res;

	mutex_lock(&vha->vha_tgt.tgt_mutex);

	list_for_each_entry(tmp, &vha->logo_list, list) {
		if (tmp->id.b24 == logo->id.b24) {
			tmp->cmd_count += logo->cmd_count;
			mutex_unlock(&vha->vha_tgt.tgt_mutex);
			return;
		}
	}

	list_add_tail(&logo->list, &vha->logo_list);

	mutex_unlock(&vha->vha_tgt.tgt_mutex);

	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);

	mutex_lock(&vha->vha_tgt.tgt_mutex);
	list_del(&logo->list);
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

952 953 954 955
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
	    logo->cmd_count, res);
956 957
}

958
void qlt_free_session_done(struct work_struct *work)
959
{
960
	struct fc_port *sess = container_of(work, struct fc_port,
961 962 963 964
	    free_work);
	struct qla_tgt *tgt = sess->tgt;
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
965 966
	unsigned long flags;
	bool logout_started = false;
967
	scsi_qla_host_t *base_vha;
968 969
	struct qlt_plogi_ack_t *own =
		sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
970 971 972

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
973
		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
974
		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
975
		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
976
		sess->logout_on_delete, sess->keep_nport_handle,
977
		sess->send_els_logo);
978

979
	if (!IS_SW_RESV_ADDR(sess->d_id)) {
980 981
		if (sess->send_els_logo) {
			qlt_port_logo_t logo;
982

983 984
			logo.id = sess->d_id;
			logo.cmd_count = 0;
985 986
			if (!own)
				qlt_send_first_logo(vha, &logo);
987
			sess->send_els_logo = 0;
988 989
		}

990
		if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
991 992
			int rc;

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
			if (!own ||
			    (own &&
			     (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
				rc = qla2x00_post_async_logout_work(vha, sess,
				    NULL);
				if (rc != QLA_SUCCESS)
					ql_log(ql_log_warn, vha, 0xf085,
					    "Schedule logo failed sess %p rc %d\n",
					    sess, rc);
				else
					logout_started = true;
			} else if (own && (own->iocb.u.isp24.status_subcode ==
				ELS_PRLI) && ha->flags.rida_fmt2) {
				rc = qla2x00_post_async_prlo_work(vha, sess,
				    NULL);
				if (rc != QLA_SUCCESS)
					ql_log(ql_log_warn, vha, 0xf085,
					    "Schedule PRLO failed sess %p rc %d\n",
					    sess, rc);
				else
					logout_started = true;
			}
1015
		}
1016
	}
1017

1018 1019 1020 1021 1022 1023
	/*
	 * Release the target session for FC Nexus from fabric module code.
	 */
	if (sess->se_sess != NULL)
		ha->tgt.tgt_ops->free_session(sess);

1024 1025 1026
	if (logout_started) {
		bool traced = false;

1027
		while (!READ_ONCE(sess->logout_completed)) {
1028 1029 1030 1031 1032 1033 1034 1035 1036
			if (!traced) {
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
					"%s: waiting for sess %p logout\n",
					__func__, sess);
				traced = true;
			}
			msleep(100);
		}

1037
		ql_dbg(ql_dbg_disc, vha, 0xf087,
1038
		    "%s: sess %p logout completed\n", __func__, sess);
1039 1040 1041 1042 1043 1044
	}

	if (sess->logo_ack_needed) {
		sess->logo_ack_needed = 0;
		qla24xx_async_notify_ack(vha, sess,
			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1045 1046
	}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (sess->se_sess) {
		sess->se_sess = NULL;
		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
			tgt->sess_count--;
	}

	sess->disc_state = DSC_DELETED;
	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
	sess->deleted = QLA_SESS_DELETED;

	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
		vha->fcport_count--;
		sess->login_succ = 0;
	}

1063
	qla2x00_clear_loop_id(sess);
1064 1065 1066 1067 1068 1069 1070 1071

	if (sess->conflict) {
		sess->conflict->login_pause = 0;
		sess->conflict = NULL;
		if (!test_bit(UNLOADING, &vha->dpc_flags))
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
	}

1072
	{
1073
		struct qlt_plogi_ack_t *con =
1074
		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1075
		struct imm_ntfy_from_isp *iocb;
1076
		own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1077 1078

		if (con) {
1079
			iocb = &con->iocb;
1080
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1081 1082 1083 1084 1085 1086
				 "se_sess %p / sess %p port %8phC is gone,"
				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
				 sess->se_sess, sess, sess->port_name,
				 own ? "releasing own PLOGI" : "no own PLOGI pending",
				 own ? own->ref_count : -1,
				 iocb->u.isp24.port_name, con->ref_count);
1087
			qlt_plogi_ack_unref(vha, con);
1088
			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1089 1090 1091 1092 1093 1094 1095 1096 1097
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
			    sess->se_sess, sess, sess->port_name,
			    own ? "releasing own PLOGI" :
			    "no own PLOGI pending",
			    own ? own->ref_count : -1);
		}

1098 1099
		if (own) {
			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1100
			qlt_plogi_ack_unref(vha, own);
1101 1102
			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		}
1103
	}
1104

1105 1106
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1107
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1108 1109
	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
		sess, sess->port_name, vha->fcport_count);
1110

1111
	if (tgt && (tgt->sess_count == 0))
1112
		wake_up_all(&tgt->waitQ);
1113 1114 1115 1116

	if (vha->fcport_count == 0)
		wake_up_all(&vha->fcport_waitQ);

1117
	base_vha = pci_get_drvdata(ha->pdev);
1118 1119 1120

	sess->free_pending = 0;

1121 1122 1123
	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
		return;

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
		switch (vha->host->active_mode) {
		case MODE_INITIATOR:
		case MODE_DUAL:
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
			break;
		case MODE_TARGET:
		default:
			/* no-op */
			break;
		}
1136
	}
1137 1138
}

1139
/* ha->tgt.sess_lock supposed to be held on entry */
1140
void qlt_unreg_sess(struct fc_port *sess)
1141 1142
{
	struct scsi_qla_host *vha = sess->vha;
1143
	unsigned long flags;
1144

1145
	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1146 1147 1148
	    "%s sess %p for deletion %8phC\n",
	    __func__, sess, sess->port_name);

1149 1150 1151 1152 1153 1154 1155 1156
	spin_lock_irqsave(&sess->vha->work_lock, flags);
	if (sess->free_pending) {
		spin_unlock_irqrestore(&sess->vha->work_lock, flags);
		return;
	}
	sess->free_pending = 1;
	spin_unlock_irqrestore(&sess->vha->work_lock, flags);

1157 1158
	if (sess->se_sess)
		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1159

1160
	qla2x00_mark_device_lost(vha, sess, 0, 0);
1161

1162
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1163 1164 1165
	sess->disc_state = DSC_DELETE_PEND;
	sess->last_rscn_gen = sess->rscn_gen;
	sess->last_login_gen = sess->login_gen;
1166

1167 1168 1169
	if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
	    !(sess->nvme_flag & NVME_FLAG_DELETING)) {
		sess->nvme_flag |= NVME_FLAG_DELETING;
1170
		schedule_work(&sess->nvme_del_work);
1171 1172 1173 1174
	} else {
		INIT_WORK(&sess->free_work, qlt_free_session_done);
		schedule_work(&sess->free_work);
	}
1175
}
1176
EXPORT_SYMBOL(qlt_unreg_sess);
1177

1178 1179 1180
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
	struct qla_hw_data *ha = vha->hw;
1181
	struct fc_port *sess = NULL;
1182 1183 1184
	uint16_t loop_id;
	int res = 0;
	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1185
	unsigned long flags;
1186 1187 1188 1189

	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
	if (loop_id == 0xFFFF) {
		/* Global event */
1190
		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1191
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1192
		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1193
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1194
	} else {
1195
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1196
		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1197
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe000,
	    "Using sess for qla_tgt_reset: %p\n", sess);
	if (!sess) {
		res = -ESRCH;
		return res;
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1208 1209
	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1210 1211
	    mcmd, loop_id);

1212
	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1213 1214
}

1215 1216
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
1217
	if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1218 1219 1220 1221 1222 1223 1224
		sess->logout_on_delete = 0;
		sess->logo_ack_needed = 0;
		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
		sess->scan_state = 0;
	}
}

1225
void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1226 1227
{
	struct qla_tgt *tgt = sess->tgt;
1228
	unsigned long flags;
1229
	u16 sec;
1230

1231 1232
	switch (sess->disc_state) {
	case DSC_DELETE_PEND:
1233
		return;
1234
	case DSC_DELETED:
1235 1236 1237 1238 1239 1240 1241
		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
			wake_up_all(&tgt->waitQ);
		if (sess->vha->fcport_count == 0)
			wake_up_all(&sess->vha->fcport_waitQ);

		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1242
			return;
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
		break;
	case DSC_UPD_FCPORT:
		/*
		 * This port is not done reporting to upper layer.
		 * let it finish
		 */
		sess->next_disc_state = DSC_DELETE_PEND;
		sec = jiffies_to_msecs(jiffies -
		    sess->jiffies_at_registration)/1000;
		if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
			sess->sec_since_registration = sec;
			ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
			    "%s %8phC : Slow Rport registration(%d Sec)\n",
			    __func__, sess->port_name, sec);
		}
		return;
	default:
		break;
1261
	}
1262

1263
	spin_lock_irqsave(&sess->vha->work_lock, flags);
1264
	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1265
		spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1266 1267
		return;
	}
1268
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1269
	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1270 1271 1272

	sess->disc_state = DSC_DELETE_PEND;

1273
	qla24xx_chk_fcp_state(sess);
1274

1275
	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1276 1277
	    "Scheduling sess %p for deletion %8phC\n",
	    sess, sess->port_name);
1278

1279
	INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1280
	WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1281
}
1282

J
Joern Engel 已提交
1283
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1284
{
1285 1286
	struct fc_port *sess;
	scsi_qla_host_t *vha = tgt->vha;
1287

1288 1289
	list_for_each_entry(sess, &vha->vp_fcports, list) {
		if (sess->se_sess)
1290
			qlt_schedule_sess_for_deletion(sess);
1291
	}
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

	/* At this point tgt could be already dead */
}

static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
	uint16_t *loop_id)
{
	struct qla_hw_data *ha = vha->hw;
	dma_addr_t gid_list_dma;
	struct gid_list_info *gid_list;
	char *id_iter;
	int res, rc, i;
	uint16_t entries;

	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    &gid_list_dma, GFP_KERNEL);
	if (!gid_list) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
		    "qla_target(%d): DMA Alloc failed of %u\n",
		    vha->vp_idx, qla2x00_gid_list_size(ha));
		return -ENOMEM;
	}

	/* Get list of logged in devices */
1316
	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1317 1318 1319 1320
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
		    "qla_target(%d): get_id_list() failed: %x\n",
		    vha->vp_idx, rc);
1321
		res = -EBUSY;
1322 1323 1324 1325
		goto out_free_id_list;
	}

	id_iter = (char *)gid_list;
1326
	res = -ENOENT;
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	for (i = 0; i < entries; i++) {
		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
		if ((gid->al_pa == s_id[2]) &&
		    (gid->area == s_id[1]) &&
		    (gid->domain == s_id[0])) {
			*loop_id = le16_to_cpu(gid->loop_id);
			res = 0;
			break;
		}
		id_iter += ha->gid_list_info_size;
	}

out_free_id_list:
	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    gid_list, gid_list_dma);
	return res;
}

/*
 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
 * Caller must put it.
 */
1349
static struct fc_port *qlt_create_sess(
1350 1351 1352 1353 1354
	struct scsi_qla_host *vha,
	fc_port_t *fcport,
	bool local)
{
	struct qla_hw_data *ha = vha->hw;
1355
	struct fc_port *sess = fcport;
1356 1357
	unsigned long flags;

1358 1359
	if (vha->vha_tgt.qla_tgt->tgt_stop)
		return NULL;
1360

1361 1362
	if (fcport->se_sess) {
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1363
			ql_dbg(ql_dbg_disc, vha, 0x20f6,
1364 1365 1366
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
1367
		}
1368
		return fcport;
1369
	}
1370
	sess->tgt = vha->vha_tgt.qla_tgt;
1371
	sess->local = local;
1372

1373 1374
	/*
	 * Under normal circumstances we want to logout from firmware when
1375 1376
	 * session eventually ends and release corresponding nport handle.
	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1377 1378
	 * code will adjust these flags as necessary.
	 */
1379 1380
	sess->logout_on_delete = 1;
	sess->keep_nport_handle = 0;
1381
	sess->logout_completed = 0;
1382

1383 1384
	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
	    &fcport->port_name[0], sess) < 0) {
1385
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
		    "(%d) %8phC check_initiator_node_acl failed\n",
		    vha->vp_idx, fcport->port_name);
		return NULL;
	} else {
		kref_init(&fcport->sess_kref);
		/*
		 * Take an extra reference to ->sess_kref here to handle
		 * fc_port access across ->tgt.sess_lock reaquire.
		 */
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1396
			ql_dbg(ql_dbg_disc, vha, 0x20f7,
1397 1398 1399 1400
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
		}
1401

1402 1403 1404
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		if (!IS_SW_RESV_ADDR(sess->d_id))
			vha->vha_tgt.qla_tgt->sess_count++;
1405

1406 1407 1408 1409 1410 1411 1412 1413
		qlt_do_generation_tick(vha, &sess->generation);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
	    vha->vha_tgt.qla_tgt->sess_count);
1414 1415

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1416 1417 1418
	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1419 1420
	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1421 1422 1423 1424

	return sess;
}

1425 1426 1427 1428 1429 1430
/*
 * max_gen - specifies maximum session generation
 * at which this deletion requestion is still valid
 */
void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1431
{
1432
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1433
	struct fc_port *sess = fcport;
1434
	unsigned long flags;
1435 1436 1437 1438

	if (!vha->hw->tgt.tgt_ops)
		return;

1439
	if (!tgt)
1440 1441
		return;

1442
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1443
	if (tgt->tgt_stop) {
1444
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1445 1446
		return;
	}
1447
	if (!sess->se_sess) {
1448
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1449 1450 1451
		return;
	}

1452
	if (max_gen - sess->generation < 0) {
1453
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1454 1455 1456 1457 1458 1459 1460 1461
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
		    "Ignoring stale deletion request for se_sess %p / sess %p"
		    " for port %8phC, req_gen %d, sess_gen %d\n",
		    sess->se_sess, sess, sess->port_name, max_gen,
		    sess->generation);
		return;
	}

1462 1463 1464
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);

	sess->local = 1;
1465
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1466
	qlt_schedule_sess_for_deletion(sess);
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
}

static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;
	int res;
	/*
	 * We need to protect against race, when tgt is freed before or
	 * inside wake_up()
	 */
1478
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1479
	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1480 1481
	    "tgt %p, sess_count=%d\n",
	    tgt, tgt->sess_count);
1482
	res = (tgt->sess_count == 0);
1483
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1484 1485 1486 1487 1488

	return res;
}

/* Called by tcm_qla2xxx configfs code */
1489
int qlt_stop_phase1(struct qla_tgt *tgt)
1490 1491 1492 1493 1494
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

1495
	mutex_lock(&ha->optrom_mutex);
1496
	mutex_lock(&qla_tgt_mutex);
1497

1498 1499 1500
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1501
		mutex_unlock(&qla_tgt_mutex);
1502
		mutex_unlock(&ha->optrom_mutex);
1503
		return -EPERM;
1504 1505
	}

1506
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1507 1508 1509 1510 1511
	    vha->host_no, vha);
	/*
	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
	 * Lock is needed, because we still can get an incoming packet.
	 */
1512
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1513
	tgt->tgt_stop = 1;
J
Joern Engel 已提交
1514
	qlt_clear_tgt_db(tgt);
1515
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1516
	mutex_unlock(&qla_tgt_mutex);
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
	    "Waiting for sess works (tgt %p)", tgt);
	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
		flush_scheduled_work();
		spin_lock_irqsave(&tgt->sess_work_lock, flags);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1529
	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1530

1531
	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1532 1533

	/* Big hammer */
1534 1535
	if (!ha->flags.host_shutting_down &&
	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1536 1537 1538
		qlt_disable_vha(vha);

	/* Wait for sessions to clear out (just in case) */
1539
	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1540 1541
	mutex_unlock(&ha->optrom_mutex);

1542
	return 0;
1543 1544 1545 1546 1547 1548
}
EXPORT_SYMBOL(qlt_stop_phase1);

/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
1549
	scsi_qla_host_t *vha = tgt->vha;
1550 1551

	if (tgt->tgt_stopped) {
1552
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1553 1554 1555 1556
		    "Already in tgt->tgt_stopped state\n");
		dump_stack();
		return;
	}
1557 1558 1559 1560 1561 1562
	if (!tgt->tgt_stop) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
		    "%s: phase1 stop is not completed\n", __func__);
		dump_stack();
		return;
	}
1563

1564
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1565 1566
	tgt->tgt_stop = 0;
	tgt->tgt_stopped = 1;
1567
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1568

1569
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1570
	    tgt);
1571 1572 1573 1574 1575 1576 1577 1578 1579

	switch (vha->qlini_mode) {
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->flags.online = 1;
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		break;
	default:
		break;
	}
1580 1581 1582 1583
}
EXPORT_SYMBOL(qlt_stop_phase2);

/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1584
static void qlt_release(struct qla_tgt *tgt)
1585
{
1586
	scsi_qla_host_t *vha = tgt->vha;
1587 1588 1589 1590
	void *node;
	u64 key = 0;
	u16 i;
	struct qla_qpair_hint *h;
1591
	struct qla_hw_data *ha = vha->hw;
1592

1593 1594 1595 1596
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
	    !tgt->tgt_stopped)
		qlt_stop_phase1(tgt);

1597
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1598 1599
		qlt_stop_phase2(tgt);

1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
		unsigned long flags;

		h = &tgt->qphints[i];
		if (h->qpair) {
			spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
			list_del(&h->hint_elem);
			spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
			h->qpair = NULL;
		}
	}
	kfree(tgt->qphints);
1612 1613 1614
	mutex_lock(&qla_tgt_mutex);
	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
	mutex_unlock(&qla_tgt_mutex);
1615 1616 1617 1618 1619 1620

	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
		btree_remove64(&tgt->lun_qpair_map, key);

	btree_destroy64(&tgt->lun_qpair_map);

1621 1622 1623 1624 1625
	if (vha->vp_idx)
		if (ha->tgt.tgt_ops &&
		    ha->tgt.tgt_ops->remove_target &&
		    vha->vha_tgt.target_lport_ptr)
			ha->tgt.tgt_ops->remove_target(vha);
1626

1627
	vha->vha_tgt.qla_tgt = NULL;
1628

1629
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	    "Release of tgt %p finished\n", tgt);

	kfree(tgt);
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
	const void *param, unsigned int param_size)
{
	struct qla_tgt_sess_work_param *prm;
	unsigned long flags;

	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
	if (!prm) {
		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
		    "qla_target(%d): Unable to create session "
		    "work, command will be refused", 0);
		return -ENOMEM;
	}

	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
	    "Scheduling work (type %d, prm %p)"
	    " to find session for param %p (size %d, tgt %p)\n",
	    type, prm, param, param_size, tgt);

	prm->type = type;
	memcpy(&prm->tm_iocb, param, param_size);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	schedule_work(&tgt->sess_work);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
1670
static void qlt_send_notify_ack(struct qla_qpair *qpair,
1671 1672 1673 1674
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
1675
	struct scsi_qla_host *vha = qpair->vha;
1676 1677 1678 1679
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	struct nack_to_isp *nack;

1680 1681 1682
	if (!ha->flags.fw_started)
		return;

1683 1684
	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);

1685
	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1686 1687 1688 1689 1690 1691 1692
	if (!pkt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe049,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1693 1694
	if (vha->vha_tgt.qla_tgt != NULL)
		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1695 1696 1697 1698 1699 1700 1701

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

1702
	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1703 1704 1705
	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
1706
			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1707 1708 1709 1710
	}
	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1711
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
	nack->u.isp24.srr_reject_code = srr_reject_code;
	nack->u.isp24.srr_reject_code_expl = srr_explan;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	ql_dbg(ql_dbg_tgt, vha, 0xe005,
	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
	    vha->vp_idx, nack->u.isp24.status);

1724 1725
	/* Memory Barrier */
	wmb();
1726
	qla2x00_start_iocbs(vha, qpair->req);
1727 1728
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
{
	struct scsi_qla_host *vha = mcmd->vha;
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl, h;
	uint8_t *p;
	int rc;
	struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
	struct qla_qpair *qpair = mcmd->qpair;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
	    ha, mcmd->fc_tm_rsp);

	rc = qlt_check_reserve_free_req(qpair, 1);
	if (rc) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate request packet\n",
		    vha->vp_idx, __func__);
		return -EAGAIN;
	}

	resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
	memset(resp, 0, sizeof(*resp));

	h = qlt_make_handle(qpair);
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else {
		qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
	}

	resp->handle = MAKE_HANDLE(qpair->req->id, h);
	resp->entry_type = ABTS_RESP_24XX;
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;

	resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
	resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
	resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
	resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
	resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
	resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];

	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

	vha->vha_tgt.qla_tgt->abts_resp_expected++;

	/* Memory Barrier */
	wmb();
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);

	return rc;
}

1817 1818 1819
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
1820
static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1821 1822 1823
	struct abts_recv_from_24xx *abts, uint32_t status,
	bool ids_reversed)
{
1824
	struct scsi_qla_host *vha = qpair->vha;
1825 1826 1827 1828 1829 1830 1831 1832 1833
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl;
	uint8_t *p;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
	    ha, abts, status);

1834 1835
	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
	    NULL);
1836 1837 1838 1839 1840 1841 1842 1843
	if (!resp) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
		return;
	}

	resp->entry_type = ABTS_RESP_24XX;
1844
	resp->handle = QLA_TGT_SKIP_HANDLE;
1845 1846 1847 1848 1849 1850
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
1851
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;
	if (ids_reversed) {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
	} else {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
	}
	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (status == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

1888
	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1889

1890 1891
	/* Memory Barrier */
	wmb();
1892 1893 1894 1895
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
1896 1897 1898 1899 1900 1901
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1902
    struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1903 1904
{
	struct ctio7_to_24xx *ctio;
1905
	u16 tmp;
1906
	struct abts_recv_from_24xx *entry;
1907

1908
	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1909 1910 1911 1912 1913 1914 1915
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1916 1917 1918 1919 1920 1921 1922
	if (mcmd)
		/* abts from remote port */
		entry = &mcmd->orig_iocb.abts;
	else
		/* abts from this driver.  */
		entry = (struct abts_recv_from_24xx *)pkt;

1923 1924 1925 1926 1927 1928 1929 1930 1931
	/*
	 * We've got on entrance firmware's response on by us generated
	 * ABTS response. So, in it ID fields are reversed.
	 */

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->nport_handle = entry->nport_handle;
	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1932
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1933 1934
	ctio->vp_index = vha->vp_idx;
	ctio->exchange_addr = entry->exchange_addr_to_abort;
1935
	tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953

	if (mcmd) {
		ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
		ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
		ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];

		if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
			tmp |= (mcmd->abort_io_attr << 9);
		else if (qpair->retry_term_cnt & 1)
			tmp |= (0x4 << 9);
	} else {
		ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
		ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
		ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];

		if (qpair->retry_term_cnt & 1)
			tmp |= (0x4 << 9);
	}
1954
	ctio->u.status1.flags = cpu_to_le16(tmp);
1955
	ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1956

1957 1958 1959 1960 1961
	ql_dbg(ql_dbg_tgt, vha, 0xe007,
	    "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
	    le16_to_cpu(ctio->u.status1.flags),
	    le16_to_cpu(ctio->u.status1.ox_id),
	    (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1962

1963 1964
	/* Memory Barrier */
	wmb();
1965 1966 1967 1968
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
1969

1970 1971 1972 1973 1974
	if (mcmd)
		qlt_build_abts_resp_iocb(mcmd);
	else
		qlt_24xx_send_abts_resp(qpair,
		    (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1975 1976 1977 1978 1979 1980 1981 1982 1983

}

/* drop cmds for the given lun
 * XXX only looks for cmds on the port through which lun reset was recieved
 * XXX does not go through the list of other port (which may have cmds
 *     for the same lun)
 */
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1984
			        u64 lun, uint8_t *s_id)
1985 1986 1987 1988
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
1989
	unsigned long flags;
1990 1991

	key = sid_to_key(s_id);
1992
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1993 1994
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key;
1995
		u64 op_lun;
1996 1997 1998 1999 2000 2001 2002

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key;
		u64 op_lun;

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}

2015 2016
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key;
2017
		u64 cmd_lun;
2018 2019 2020 2021 2022

		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		cmd_lun = scsilun_to_int(
			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
		if (cmd_key == key && cmd_lun == lun)
2023
			cmd->aborted = 1;
2024
	}
2025
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2026 2027
}

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
    uint64_t unpacked_lun)
{
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_qpair_hint *h = NULL;

	if (vha->flags.qpairs_available) {
		h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
		if (!h)
			h = &tgt->qphints[0];
	} else {
		h = &tgt->qphints[0];
	}

	return h;
}

static void qlt_do_tmr_work(struct work_struct *work)
{
	struct qla_tgt_mgmt_cmd *mcmd =
		container_of(work, struct qla_tgt_mgmt_cmd, work);
	struct qla_hw_data *ha = mcmd->vha->hw;
	int rc = EIO;
	uint32_t tag;
	unsigned long flags;

	switch (mcmd->tmr_func) {
	case QLA_TGT_ABTS:
		tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
		break;
	default:
		tag = 0;
		break;
	}

	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
	    mcmd->tmr_func, tag);

	if (rc != 0) {
		spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
		switch (mcmd->tmr_func) {
		case QLA_TGT_ABTS:
2070 2071
			mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
			qlt_build_abts_resp_iocb(mcmd);
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
			break;
		case QLA_TGT_LUN_RESET:
		case QLA_TGT_CLEAR_TS:
		case QLA_TGT_ABORT_TS:
		case QLA_TGT_CLEAR_ACA:
		case QLA_TGT_TARGET_RESET:
			qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
			    qla_sam_status);
			break;

		case QLA_TGT_ABORT_ALL:
		case QLA_TGT_NEXUS_LOSS_SESS:
		case QLA_TGT_NEXUS_LOSS:
			qlt_send_notify_ack(mcmd->qpair,
			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
			break;
		}
		spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);

		ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
		    "qla_target(%d):  tgt_ops->handle_tmr() failed: %d\n",
		    mcmd->vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
	}
}

2098 2099
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2100
	struct abts_recv_from_24xx *abts, struct fc_port *sess)
2101 2102 2103
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
2104
	struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2105

2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
	    "qla_target(%d): task abort (tag=%d)\n",
	    vha->vp_idx, abts->exchange_addr_to_abort);

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));
2118
	mcmd->cmd_type = TYPE_TGT_TMCMD;
2119 2120
	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2121
	mcmd->reset_count = ha->base_qpair->chip_reset;
2122
	mcmd->tmr_func = QLA_TGT_ABTS;
2123
	mcmd->qpair = h->qpair;
2124
	mcmd->vha = vha;
2125

2126 2127 2128 2129
	/*
	 * LUN is looked up by target-core internally based on the passed
	 * abts->exchange_addr_to_abort tag.
	 */
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
	mcmd->se_cmd.cpuid = h->cpuid;

	if (ha->tgt.tgt_ops->find_cmd_by_tag) {
		struct qla_tgt_cmd *abort_cmd;

		abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
		    abts->exchange_addr_to_abort);
		if (abort_cmd && abort_cmd->qpair) {
			mcmd->qpair = abort_cmd->qpair;
			mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2140 2141
			mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
			mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2142
		}
2143 2144
	}

2145 2146 2147
	INIT_WORK(&mcmd->work, qlt_do_tmr_work);
	queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts)
{
	struct qla_hw_data *ha = vha->hw;
2158
	struct fc_port *sess;
2159 2160 2161
	uint32_t tag = abts->exchange_addr_to_abort;
	uint8_t s_id[3];
	int rc;
2162
	unsigned long flags;
2163 2164 2165 2166 2167

	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
		    "qla_target(%d): ABTS: Abort Sequence not "
		    "supported\n", vha->vp_idx);
2168 2169
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2170 2171 2172 2173 2174 2175 2176
		return;
	}

	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
		    "qla_target(%d): ABTS: Unknown Exchange "
		    "Address received\n", vha->vp_idx);
2177 2178
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
	    le32_to_cpu(abts->fcp_hdr_le.parameter));

	s_id[0] = abts->fcp_hdr_le.s_id[2];
	s_id[1] = abts->fcp_hdr_le.s_id[1];
	s_id[2] = abts->fcp_hdr_le.s_id[0];

2192
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2193 2194 2195
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2196
		    "qla_target(%d): task abort for non-existent session\n",
2197
		    vha->vp_idx);
2198 2199
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

2200 2201
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
			    false);
2202 2203
		return;
	}
2204 2205
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

2206

2207
	if (sess->deleted) {
2208 2209
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2210 2211 2212
		return;
	}

2213 2214 2215 2216 2217
	rc = __qlt_24xx_handle_abts(vha, abts, sess);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
		    vha->vp_idx, rc);
2218 2219
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2220 2221 2222 2223 2224 2225 2226
		return;
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
2227
static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2228 2229
	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
2230
	struct scsi_qla_host *ha = mcmd->vha;
2231 2232
	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
	struct ctio7_to_24xx *ctio;
2233
	uint16_t temp;
2234 2235 2236 2237 2238 2239

	ql_dbg(ql_dbg_tgt, ha, 0xe008,
	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
	    ha, atio, resp_code);


2240
	ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", ha->vp_idx, __func__);
		return;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
	ctio->nport_handle = mcmd->sess->loop_id;
2252
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2253 2254 2255 2256 2257
	ctio->vp_index = ha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2258 2259 2260
	temp = (atio->u.isp24.attr << 9)|
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2261 2262
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
2263
	ctio->u.status1.scsi_status =
2264 2265
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
	ctio->u.status1.response_len = cpu_to_le16(8);
2266
	ctio->u.status1.sense_data[0] = resp_code;
2267

2268 2269
	/* Memory Barrier */
	wmb();
2270 2271 2272 2273
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(ha, qpair->req);
2274 2275 2276 2277 2278 2279 2280 2281
}

void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
}
EXPORT_SYMBOL(qlt_free_mcmd);

2282 2283 2284 2285
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then
 * reacquire
 */
2286
void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2287 2288 2289 2290 2291
    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
	struct atio_from_isp *atio = &cmd->atio;
	struct ctio7_to_24xx *ctio;
	uint16_t temp;
2292
	struct scsi_qla_host *vha = cmd->vha;
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316

	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
	    "sense_key=%02x, asc=%02x, ascq=%02x",
	    vha, atio, scsi_status, sense_key, asc, ascq);

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
	if (!ctio) {
		ql_dbg(ql_dbg_async, vha, 0x3067,
		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
		    vha->host_no, __func__);
		goto out;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE;
	ctio->nport_handle = cmd->sess->loop_id;
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2317 2318 2319
	temp = (atio->u.isp24.attr << 9) |
	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
	ctio->u.status1.scsi_status =
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
	ctio->u.status1.response_len = cpu_to_le16(18);
	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));

	if (ctio->u.status1.residual != 0)
		ctio->u.status1.scsi_status |=
		    cpu_to_le16(SS_RESIDUAL_UNDER);

	/* Response code and sense key */
	put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
	    (&ctio->u.status1.sense_data)[0]);
	/* Additional sense length */
	put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
	/* ASC and ASCQ */
	put_unaligned_le32(((asc << 24) | (ascq << 16)),
	    (&ctio->u.status1.sense_data)[3]);

	/* Memory Barrier */
	wmb();

2343 2344 2345 2346 2347
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);

2348 2349 2350 2351
out:
	return;
}

2352 2353 2354 2355 2356 2357
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
	struct scsi_qla_host *vha = mcmd->sess->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
2358
	struct qla_qpair *qpair = mcmd->qpair;
2359
	bool free_mcmd = true;
2360 2361 2362 2363 2364

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
	    "TM response mcmd (%p) status %#x state %#x",
	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);

2365
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2366

2367
	if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2368
		/*
2369
		 * Either the port is not online or this request was from
2370 2371 2372
		 * previous life, just abort the processing.
		 */
		ql_dbg(ql_dbg_async, vha, 0xe100,
2373 2374
			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
2375
			mcmd->reset_count, qpair->chip_reset);
2376
		ha->tgt.tgt_ops->free_mcmd(mcmd);
2377
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2378 2379 2380
		return;
	}

2381 2382
	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2383 2384 2385 2386 2387
		    ELS_LOGO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_PRLO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_TPRLO) {
2388
			ql_dbg(ql_dbg_disc, vha, 0x2106,
2389 2390 2391
			    "TM response logo %phC status %#x state %#x",
			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
			    mcmd->flags);
2392
			qlt_schedule_sess_for_deletion(mcmd->sess);
2393
		} else {
2394 2395
			qlt_send_notify_ack(vha->hw->base_qpair,
			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2396 2397
		}
	} else {
2398 2399 2400 2401
		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
			qlt_build_abts_resp_iocb(mcmd);
			free_mcmd = false;
		} else
2402
			qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
			    mcmd->fc_tm_rsp);
	}
	/*
	 * Make the callback for ->free_mcmd() to queue_work() and invoke
	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
	 * qlt_xmit_tm_rsp() returns here..
	 */
2413 2414 2415
	if (free_mcmd)
		ha->tgt.tgt_ops->free_mcmd(mcmd);

2416
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);

/* No locks */
static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd = prm->cmd;

	BUG_ON(cmd->sg_cnt == 0);

	prm->sg = (struct scatterlist *)cmd->sg;
2428
	prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
2429 2430 2431 2432 2433 2434
	    cmd->sg_cnt, cmd->dma_data_direction);
	if (unlikely(prm->seg_cnt == 0))
		goto out_err;

	prm->cmd->sg_mapped = 1;

2435 2436 2437 2438 2439
	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
		/*
		 * If greater than four sg entries then we need to allocate
		 * the continuation entries
		 */
2440
		if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2441
			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2442 2443
			QLA_TGT_DATASEGS_PER_CMD_24XX,
			QLA_TGT_DATASEGS_PER_CONT_24XX);
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	} else {
		/* DIF */
		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
			prm->tot_dsds = prm->seg_cnt;
		} else
			prm->tot_dsds = prm->seg_cnt;

		if (cmd->prot_sg_cnt) {
			prm->prot_sg      = cmd->prot_sg;
2455
			prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
				cmd->prot_sg, cmd->prot_sg_cnt,
				cmd->dma_data_direction);
			if (unlikely(prm->prot_seg_cnt == 0))
				goto out_err;

			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
				/* Dif Bundling not support here */
				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
								cmd->blk_sz);
				prm->tot_dsds += prm->prot_seg_cnt;
			} else
				prm->tot_dsds += prm->prot_seg_cnt;
		}
	}
2471 2472 2473 2474

	return 0;

out_err:
2475
	ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2476 2477 2478 2479 2480
	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
	    0, prm->cmd->sg_cnt);
	return -1;
}

J
Joern Engel 已提交
2481
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2482
{
2483 2484
	struct qla_hw_data *ha;
	struct qla_qpair *qpair;
J
Joern Engel 已提交
2485 2486 2487
	if (!cmd->sg_mapped)
		return;

2488 2489 2490 2491
	qpair = cmd->qpair;

	pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
	    cmd->dma_data_direction);
2492
	cmd->sg_mapped = 0;
2493 2494

	if (cmd->prot_sg_cnt)
2495
		pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2496 2497
			cmd->dma_data_direction);

2498 2499
	if (!cmd->ctx)
		return;
2500
	ha = vha->hw;
2501
	if (cmd->ctx_dsd_alloced)
2502
		qla2x00_clean_dsd_pool(ha, cmd->ctx);
2503

2504
	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2505 2506
}

2507
static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2508 2509
	uint32_t req_cnt)
{
2510
	uint32_t cnt;
2511
	struct req_que *req = qpair->req;
2512

2513
	if (req->cnt < (req_cnt + 2)) {
2514 2515
		cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
		    RD_REG_DWORD_RELAXED(req->req_q_out));
2516

2517 2518
		if  (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
2519
		else
2520
			req->cnt = req->length - (req->ring_index - cnt);
2521

2522
		if (unlikely(req->cnt < (req_cnt + 2)))
2523
			return -EAGAIN;
2524
	}
2525

2526
	req->cnt -= req_cnt;
2527 2528 2529 2530 2531 2532 2533

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
2534
static inline void *qlt_get_req_pkt(struct req_que *req)
2535 2536
{
	/* Adjust ring index. */
2537 2538 2539 2540
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
2541
	} else {
2542
		req->ring_ptr++;
2543
	}
2544
	return (cont_entry_t *)req->ring_ptr;
2545 2546 2547
}

/* ha->hardware_lock supposed to be held on entry */
2548
static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2549 2550
{
	uint32_t h;
2551 2552
	int index;
	uint8_t found = 0;
2553
	struct req_que *req = qpair->req;
2554 2555

	h = req->current_outstanding_cmd;
2556

2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
	for (index = 1; index < req->num_outstanding_cmds; index++) {
		h++;
		if (h == req->num_outstanding_cmds)
			h = 1;

		if (h == QLA_TGT_SKIP_HANDLE)
			continue;

		if (!req->outstanding_cmds[h]) {
			found = 1;
2567 2568
			break;
		}
2569
	}
2570

2571 2572 2573
	if (found) {
		req->current_outstanding_cmd = h;
	} else {
2574 2575 2576
		ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
		    "qla_target(%d): Ran out of empty cmd slots\n",
		    qpair->vha->vp_idx);
2577 2578
		h = QLA_TGT_NULL_HANDLE;
	}
2579 2580 2581 2582 2583

	return h;
}

/* ha->hardware_lock supposed to be held on entry */
2584 2585
static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
	struct qla_tgt_prm *prm)
2586 2587 2588 2589
{
	uint32_t h;
	struct ctio7_to_24xx *pkt;
	struct atio_from_isp *atio = &prm->cmd->atio;
2590
	uint16_t temp;
2591

2592
	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2593 2594 2595 2596 2597
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	pkt->entry_type = CTIO_TYPE7;
	pkt->entry_count = (uint8_t)prm->req_cnt;
2598
	pkt->vp_index = prm->cmd->vp_idx;
2599

2600
	h = qlt_make_handle(qpair);
2601 2602 2603 2604 2605 2606 2607
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
2608 2609
	} else
		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2610

2611 2612 2613
	pkt->handle = MAKE_HANDLE(qpair->req->id, h);
	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2614
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2615 2616 2617 2618
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr = atio->u.isp24.exchange_addr;
2619 2620
	temp = atio->u.isp24.attr << 9;
	pkt->u.status0.flags |= cpu_to_le16(temp);
2621 2622
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->u.status0.ox_id = cpu_to_le16(temp);
2623 2624 2625 2626 2627 2628 2629 2630 2631
	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
2632
static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2633 2634 2635 2636 2637 2638 2639
{
	int cnt;
	uint32_t *dword_ptr;

	/* Build continuation packets */
	while (prm->seg_cnt > 0) {
		cont_a64_entry_t *cont_pkt64 =
2640 2641
			(cont_a64_entry_t *)qlt_get_req_pkt(
			   prm->cmd->qpair->req);
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654

		/*
		 * Make sure that from cont_pkt64 none of
		 * 64-bit specific fields used for 32-bit
		 * addressing. Cast to (cont_entry_t *) for
		 * that.
		 */

		memset(cont_pkt64, 0, sizeof(*cont_pkt64));

		cont_pkt64->entry_count = 1;
		cont_pkt64->sys_define = 0;

2655 2656
		cont_pkt64->entry_type = CONTINUE_A64_TYPE;
		dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
2657 2658 2659

		/* Load continuation entry data segments */
		for (cnt = 0;
2660
		    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2661 2662 2663 2664
		    cnt++, prm->seg_cnt--) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_lo32
				(sg_dma_address(prm->sg)));
2665 2666
			*dword_ptr++ = cpu_to_le32(pci_dma_hi32
			    (sg_dma_address(prm->sg)));
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

			prm->sg = sg_next(prm->sg);
		}
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
2678
static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
{
	int cnt;
	uint32_t *dword_ptr;
	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;

	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);

	/* Setup packet address segment pointer */
	dword_ptr = pkt24->u.status0.dseg_0_address;

	/* Set total data segment count */
	if (prm->seg_cnt)
		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);

	if (prm->seg_cnt == 0) {
		/* No data transfer */
		*dword_ptr++ = 0;
		*dword_ptr = 0;
		return;
	}

	/* If scatter gather */

	/* Load command entry data segments */
	for (cnt = 0;
2704
	    (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2705 2706 2707
	    cnt++, prm->seg_cnt--) {
		*dword_ptr++ =
		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2708 2709 2710 2711

		*dword_ptr++ = cpu_to_le32(pci_dma_hi32(
			sg_dma_address(prm->sg)));

2712 2713 2714 2715 2716
		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

		prm->sg = sg_next(prm->sg);
	}

2717
	qlt_load_cont_data_segments(prm);
2718 2719 2720 2721 2722 2723 2724
}

static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
{
	return cmd->bufflen > 0;
}

2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736
static void qlt_print_dif_err(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd;
	struct scsi_qla_host *vha;

	/* asc 0x10=dif error */
	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
		cmd = prm->cmd;
		vha = cmd->vha;
		/* ASCQ */
		switch (prm->sense_buffer[13]) {
		case 1:
2737
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2738 2739 2740 2741 2742 2743
			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 2:
2744
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2745 2746 2747 2748 2749 2750
			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 3:
2751
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2752 2753 2754 2755 2756 2757
			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		default:
2758
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2759 2760 2761 2762 2763 2764
			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		}
2765
		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2766 2767 2768
	}
}

2769 2770 2771 2772 2773 2774 2775 2776
/*
 * Called without ha->hardware_lock held
 */
static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
	uint32_t *full_req_cnt)
{
	struct se_cmd *se_cmd = &cmd->se_cmd;
2777
	struct qla_qpair *qpair = cmd->qpair;
2778 2779

	prm->cmd = cmd;
2780 2781
	prm->tgt = cmd->tgt;
	prm->pkt = NULL;
2782 2783 2784 2785 2786 2787
	prm->rq_result = scsi_status;
	prm->sense_buffer = &cmd->sense_buffer[0];
	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
	prm->sg = NULL;
	prm->seg_cnt = -1;
	prm->req_cnt = 1;
2788
	prm->residual = 0;
2789
	prm->add_status_pkt = 0;
2790 2791 2792
	prm->prot_sg = NULL;
	prm->prot_seg_cnt = 0;
	prm->tot_dsds = 0;
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802

	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
		if  (qlt_pci_map_calc_cnt(prm) != 0)
			return -EAGAIN;
	}

	*full_req_cnt = prm->req_cnt;

	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2803
		ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2804 2805 2806 2807
		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag,
		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
		       cmd->bufflen, prm->rq_result);
2808 2809 2810
		prm->rq_result |= SS_RESIDUAL_UNDER;
	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2811
		ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2812 2813 2814
		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
		prm->rq_result |= SS_RESIDUAL_OVER;
	}

	if (xmit_type & QLA_TGT_XMIT_STATUS) {
		/*
		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
		 * ignored in *xmit_response() below
		 */
		if (qlt_has_data(cmd)) {
			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2825
			    (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
			    (prm->rq_result != 0))) {
				prm->add_status_pkt = 1;
				(*full_req_cnt)++;
			}
		}
	}

	return 0;
}

2836 2837
static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
    int sending_sense)
2838
{
2839
	if (cmd->qpair->enable_class_2)
2840 2841 2842 2843 2844
		return 0;

	if (sending_sense)
		return cmd->conf_compl_supported;
	else
2845 2846
		return cmd->qpair->enable_explicit_conf &&
                    cmd->conf_compl_supported;
2847 2848 2849 2850 2851 2852 2853
}

static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
	struct qla_tgt_prm *prm)
{
	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2854
	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2855
	if (qlt_need_explicit_conf(prm->cmd, 0)) {
2856
		ctio->u.status0.flags |= cpu_to_le16(
2857 2858 2859 2860 2861 2862 2863 2864
		    CTIO7_FLAGS_EXPLICIT_CONFORM |
		    CTIO7_FLAGS_CONFORM_REQ);
	}
	ctio->u.status0.residual = cpu_to_le32(prm->residual);
	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
		int i;

2865
		if (qlt_need_explicit_conf(prm->cmd, 1)) {
2866
			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2867
				ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2868 2869 2870 2871 2872
				    "Skipping EXPLICIT_CONFORM and "
				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
				    "non GOOD status\n");
				goto skip_explict_conf;
			}
2873
			ctio->u.status1.flags |= cpu_to_le16(
2874 2875 2876 2877 2878
			    CTIO7_FLAGS_EXPLICIT_CONFORM |
			    CTIO7_FLAGS_CONFORM_REQ);
		}
skip_explict_conf:
		ctio->u.status1.flags &=
2879
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2880
		ctio->u.status1.flags |=
2881
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2882
		ctio->u.status1.scsi_status |=
2883
		    cpu_to_le16(SS_SENSE_LEN_VALID);
2884 2885 2886 2887 2888
		ctio->u.status1.sense_length =
		    cpu_to_le16(prm->sense_buffer_len);
		for (i = 0; i < prm->sense_buffer_len/4; i++)
			((uint32_t *)ctio->u.status1.sense_data)[i] =
				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2889 2890 2891

		qlt_print_dif_err(prm);

2892 2893
	} else {
		ctio->u.status1.flags &=
2894
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2895
		ctio->u.status1.flags |=
2896
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2897 2898 2899 2900 2901 2902 2903 2904
		ctio->u.status1.sense_length = 0;
		memset(ctio->u.status1.sense_data, 0,
		    sizeof(ctio->u.status1.sense_data));
	}

	/* Sense with len > 24, is it possible ??? */
}

2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
		if (ql2xenablehba_err_chk >= 1)
			return 1;
		break;
	case TARGET_PROT_DOUT_PASS:
	case TARGET_PROT_DIN_PASS:
		if (ql2xenablehba_err_chk >= 2)
			return 1;
		break;
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		return 1;
	default:
		break;
	}
	return 0;
}

2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
static inline int
qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
	    return 1;
	default:
	    return 0;
	}
	return 0;
}

2945
/*
2946
 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2947
 */
2948 2949 2950
static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
    uint16_t *pfw_prot_opts)
2951
{
2952
	struct se_cmd *se_cmd = &cmd->se_cmd;
2953
	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2954 2955 2956
	scsi_qla_host_t *vha = cmd->tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	uint32_t t32 = 0;
2957

2958 2959
	/*
	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2960 2961 2962
	 * have been immplemented by TCM, before AppTag is avail.
	 * Look for modesense_handlers[]
	 */
2963
	ctx->app_tag = 0;
2964 2965 2966
	ctx->app_tag_mask[0] = 0x0;
	ctx->app_tag_mask[1] = 0x0;

2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
	if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);

2977 2978 2979
	switch (se_cmd->prot_type) {
	case TARGET_DIF_TYPE0_PROT:
		/*
2980 2981 2982
		 * No check for ql2xenablehba_err_chk, as it
		 * would be an I/O error if hba tag generation
		 * is not done.
2983 2984 2985 2986 2987 2988 2989 2990 2991
		 */
		ctx->ref_tag = cpu_to_le32(lba);
		/* enable ALL bytes of the ref tag */
		ctx->ref_tag_mask[0] = 0xff;
		ctx->ref_tag_mask[1] = 0xff;
		ctx->ref_tag_mask[2] = 0xff;
		ctx->ref_tag_mask[3] = 0xff;
		break;
	case TARGET_DIF_TYPE1_PROT:
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007
	    /*
	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
	     * REF tag, and 16 bit app tag.
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
3008
	case TARGET_DIF_TYPE2_PROT:
3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
	    /*
	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
	     * tag has to match LBA in CDB + N
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
3025
	case TARGET_DIF_TYPE3_PROT:
3026 3027 3028 3029 3030
	    /* For TYPE 3 protection: 16 bit GUARD only */
	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
	    break;
3031 3032 3033 3034
	}
}

static inline int
3035
qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
{
	uint32_t		*cur_dsd;
	uint32_t		transfer_length = 0;
	uint32_t		data_bytes;
	uint32_t		dif_bytes;
	uint8_t			bundling = 1;
	struct crc_context	*crc_ctx_pkt = NULL;
	struct qla_hw_data	*ha;
	struct ctio_crc2_to_fw	*pkt;
	dma_addr_t		crc_ctx_dma;
	uint16_t		fw_prot_opts = 0;
	struct qla_tgt_cmd	*cmd = prm->cmd;
	struct se_cmd		*se_cmd = &cmd->se_cmd;
	uint32_t h;
	struct atio_from_isp *atio = &prm->cmd->atio;
3051
	struct qla_tc_param	tc;
3052
	uint16_t t16;
3053
	scsi_qla_host_t *vha = cmd->vha;
3054 3055 3056

	ha = vha->hw;

3057
	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3058 3059 3060
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

3061
	ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3062
		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3063
		cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077
		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);

	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
		bundling = 0;

	/* Compute dif len and adjust data len to incude protection */
	data_bytes = cmd->bufflen;
	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		transfer_length = data_bytes;
3078 3079
		if (cmd->prot_sg_cnt)
			data_bytes += dif_bytes;
3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		transfer_length = data_bytes + dif_bytes;
		break;
	default:
		BUG();
		break;
	}

	if (!qlt_hba_err_chk_enabled(se_cmd))
		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
	/* HBA error checking enabled */
	else if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
		fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
		fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		fw_prot_opts |= PO_MODE_DIF_PASS;
		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
		break;
	default:/* Normal Request */
		fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	}

	/* ---- PKT ---- */
	/* Update entry type to indicate Command Type CRC_2 IOCB */
	pkt->entry_type  = CTIO_CRC2;
	pkt->entry_count = 1;
3126
	pkt->vp_index = cmd->vp_idx;
3127

3128
	h = qlt_make_handle(qpair);
3129 3130 3131 3132 3133 3134 3135 3136
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
3137
		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3138

3139 3140
	pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3141
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3142
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3143 3144 3145 3146
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
3147 3148 3149 3150 3151 3152 3153

	/* silence compile warning */
	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->ox_id  = cpu_to_le16(t16);

	t16 = (atio->u.isp24.attr << 9);
	pkt->flags |= cpu_to_le16(t16);
3154 3155 3156 3157
	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);

	/* Set transfer direction */
	if (cmd->dma_data_direction == DMA_TO_DEVICE)
3158
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3159
	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3160
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3161 3162 3163 3164 3165 3166 3167 3168 3169

	pkt->dseg_count = prm->tot_dsds;
	/* Fibre channel byte count */
	pkt->transfer_length = cpu_to_le32(transfer_length);

	/* ----- CRC context -------- */

	/* Allocate CRC context from global pool */
	crc_ctx_pkt = cmd->ctx =
3170
	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180

	if (!crc_ctx_pkt)
		goto crc_queuing_error;

	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);

	/* Set handle */
	crc_ctx_pkt->handle = pkt->handle;

3181
	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204

	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;

	if (!bundling) {
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
	} else {
		/*
		 * Configure Bundling if we need to fetch interlaving
		 * protection PCI accesses
		 */
		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
		crc_ctx_pkt->u.bundling.dseg_count =
			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
	}

	/* Finish the common fields of CRC pkt */
	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3205
	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3206

3207 3208 3209 3210 3211 3212 3213 3214
	memset((uint8_t *)&tc, 0 , sizeof(tc));
	tc.vha = vha;
	tc.blk_sz = cmd->blk_sz;
	tc.bufflen = cmd->bufflen;
	tc.sg = cmd->sg;
	tc.prot_sg = cmd->prot_sg;
	tc.ctx = crc_ctx_pkt;
	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3215 3216

	/* Walks data segments */
3217
	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3218 3219 3220

	if (!bundling && prm->prot_seg_cnt) {
		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3221
			prm->tot_dsds, &tc))
3222 3223
			goto crc_queuing_error;
	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3224
		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
3225 3226 3227 3228
		goto crc_queuing_error;

	if (bundling && prm->prot_seg_cnt) {
		/* Walks dif segments */
3229
		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3230 3231 3232

		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3233
			prm->prot_seg_cnt, &tc))
3234 3235 3236 3237 3238 3239
			goto crc_queuing_error;
	}
	return QLA_SUCCESS;

crc_queuing_error:
	/* Cleanup will be performed by the caller */
3240
	qpair->req->outstanding_cmds[h] = NULL;
3241 3242 3243 3244

	return QLA_FUNCTION_FAILED;
}

3245 3246 3247 3248 3249 3250 3251 3252
/*
 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
 */
int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	uint8_t scsi_status)
{
	struct scsi_qla_host *vha = cmd->vha;
3253
	struct qla_qpair *qpair = cmd->qpair;
3254 3255 3256 3257 3258 3259
	struct ctio7_to_24xx *pkt;
	struct qla_tgt_prm prm;
	uint32_t full_req_cnt = 0;
	unsigned long flags = 0;
	int res;

3260
	if (cmd->sess && cmd->sess->deleted) {
3261 3262 3263 3264 3265
		cmd->state = QLA_TGT_STATE_PROCESSED;
		if (cmd->sess->logout_completed)
			/* no need to terminate. FW already freed exchange. */
			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		else
3266
			qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
3267 3268 3269
		return 0;
	}

3270
	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3271
	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3272 3273
	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3274
	    &cmd->se_cmd, qpair->id);
3275 3276 3277 3278 3279 3280 3281

	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
	    &full_req_cnt);
	if (unlikely(res != 0)) {
		return res;
	}

3282
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3283

3284
	if (xmit_type == QLA_TGT_XMIT_STATUS)
3285
		qpair->tgt_counters.core_qla_snd_status++;
3286
	else
3287
		qpair->tgt_counters.core_qla_que_buf++;
3288

3289
	if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3290
		/*
3291
		 * Either the port is not online or this request was from
3292 3293 3294 3295
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_PROCESSED;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3296
		ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3297 3298
			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
3299
			cmd->reset_count, qpair->chip_reset);
3300
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3301 3302 3303
		return 0;
	}

3304
	/* Does F/W have an IOCBs for this request */
3305
	res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3306 3307 3308
	if (unlikely(res))
		goto out_unmap_unlock;

3309
	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3310
		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3311
	else
3312
		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3313
	if (unlikely(res != 0)) {
3314
		qpair->req->cnt += full_req_cnt;
3315
		goto out_unmap_unlock;
3316
	}
3317 3318 3319 3320 3321

	pkt = (struct ctio7_to_24xx *)prm.pkt;

	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
		pkt->u.status0.flags |=
3322
		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3323 3324
			CTIO7_FLAGS_STATUS_MODE_0);

3325
		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3326
			qlt_load_data_segments(&prm);
3327 3328 3329 3330 3331 3332 3333

		if (prm.add_status_pkt == 0) {
			if (xmit_type & QLA_TGT_XMIT_STATUS) {
				pkt->u.status0.scsi_status =
				    cpu_to_le16(prm.rq_result);
				pkt->u.status0.residual =
				    cpu_to_le32(prm.residual);
3334
				pkt->u.status0.flags |= cpu_to_le16(
3335
				    CTIO7_FLAGS_SEND_STATUS);
3336
				if (qlt_need_explicit_conf(cmd, 0)) {
3337
					pkt->u.status0.flags |=
3338
					    cpu_to_le16(
3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350
						CTIO7_FLAGS_EXPLICIT_CONFORM |
						CTIO7_FLAGS_CONFORM_REQ);
				}
			}

		} else {
			/*
			 * We have already made sure that there is sufficient
			 * amount of request entries to not drop HW lock in
			 * req_pkt().
			 */
			struct ctio7_to_24xx *ctio =
3351 3352
				(struct ctio7_to_24xx *)qlt_get_req_pkt(
				    qpair->req);
3353

3354
			ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3355 3356
			    "Building additional status packet 0x%p.\n",
			    ctio);
3357

3358 3359 3360 3361
			/*
			 * T10Dif: ctio_crc2_to_fw overlay ontop of
			 * ctio7_to_24xx
			 */
3362
			memcpy(ctio, pkt, sizeof(*ctio));
3363
			/* reset back to CTIO7 */
3364
			ctio->entry_count = 1;
3365
			ctio->entry_type = CTIO_TYPE7;
3366
			ctio->dseg_count = 0;
3367
			ctio->u.status1.flags &= ~cpu_to_le16(
3368 3369 3370 3371
			    CTIO7_FLAGS_DATA_IN);

			/* Real finish is ctio_m1's finish */
			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3372
			pkt->u.status0.flags |= cpu_to_le16(
3373
			    CTIO7_FLAGS_DONT_RET_CTIO);
3374 3375 3376 3377 3378

			/* qlt_24xx_init_ctio_to_isp will correct
			 * all neccessary fields that's part of CTIO7.
			 * There should be no residual of CTIO-CRC2 data.
			 */
3379 3380 3381 3382 3383 3384 3385 3386
			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
			    &prm);
		}
	} else
		qlt_24xx_init_ctio_to_isp(pkt, &prm);


	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3387
	spin_lock(&cmd->cmd_lock);
3388
	cmd->cmd_sent_to_fw = 1;
3389
	spin_unlock(&cmd->cmd_lock);
3390
	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3391

3392 3393
	/* Memory Barrier */
	wmb();
3394 3395 3396 3397
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3398
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3399 3400 3401 3402

	return 0;

out_unmap_unlock:
J
Joern Engel 已提交
3403
	qlt_unmap_sg(vha, cmd);
3404
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415

	return res;
}
EXPORT_SYMBOL(qlt_xmit_response);

int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
	struct ctio7_to_24xx *pkt;
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_tgt *tgt = cmd->tgt;
	struct qla_tgt_prm prm;
3416
	unsigned long flags = 0;
3417
	int res = 0;
3418
	struct qla_qpair *qpair = cmd->qpair;
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429

	memset(&prm, 0, sizeof(prm));
	prm.cmd = cmd;
	prm.tgt = tgt;
	prm.sg = NULL;
	prm.req_cnt = 1;

	/* Calculate number of entries and segments required */
	if (qlt_pci_map_calc_cnt(&prm) != 0)
		return -EAGAIN;

3430
	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3431
	    (cmd->sess && cmd->sess->deleted)) {
3432
		/*
3433
		 * Either the port is not online or this request was from
3434 3435 3436 3437
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_NEED_DATA;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3438
		ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3439 3440
			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
3441
			cmd->reset_count, qpair->chip_reset);
3442 3443 3444
		return 0;
	}

3445
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3446
	/* Does F/W have an IOCBs for this request */
3447
	res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3448 3449
	if (res != 0)
		goto out_unlock_free_unmap;
3450
	if (cmd->se_cmd.prot_op)
3451
		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3452
	else
3453
		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3454

3455
	if (unlikely(res != 0)) {
3456
		qpair->req->cnt += prm.req_cnt;
3457
		goto out_unlock_free_unmap;
3458 3459
	}

3460
	pkt = (struct ctio7_to_24xx *)prm.pkt;
3461
	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3462
	    CTIO7_FLAGS_STATUS_MODE_0);
3463 3464

	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3465
		qlt_load_data_segments(&prm);
3466 3467

	cmd->state = QLA_TGT_STATE_NEED_DATA;
3468
	spin_lock(&cmd->cmd_lock);
3469
	cmd->cmd_sent_to_fw = 1;
3470
	spin_unlock(&cmd->cmd_lock);
3471
	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3472

3473 3474
	/* Memory Barrier */
	wmb();
3475 3476 3477 3478
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3479
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3480 3481 3482 3483

	return res;

out_unlock_free_unmap:
J
Joern Engel 已提交
3484
	qlt_unmap_sg(vha, cmd);
3485
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3486 3487 3488 3489 3490

	return res;
}
EXPORT_SYMBOL(qlt_rdy_to_xfer);

3491 3492

/*
3493
 * it is assumed either hardware_lock or qpair lock is held.
3494
 */
3495
static void
3496
qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3497
	struct ctio_crc_from_fw *sts)
3498 3499 3500 3501
{
	uint8_t		*ap = &sts->actual_dif[0];
	uint8_t		*ep = &sts->expected_dif[0];
	uint64_t	lba = cmd->se_cmd.t_task_lba;
3502 3503
	uint8_t scsi_status, sense_key, asc, ascq;
	unsigned long flags;
3504
	struct scsi_qla_host *vha = cmd->vha;
3505

3506
	cmd->trc_flags |= TRC_DIF_ERR;
3507

3508 3509 3510
	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3511

3512 3513 3514
	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3515

3516 3517
	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3518

3519
	scsi_status = sense_key = asc = ascq = 0;
3520

3521 3522
	/* check appl tag */
	if (cmd->e_app_tag != cmd->a_app_tag) {
3523 3524 3525 3526 3527 3528
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3529 3530 3531 3532 3533 3534

		cmd->dif_err_code = DIF_ERR_APP;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x2;
3535 3536 3537
	}

	/* check ref tag */
3538
	if (cmd->e_ref_tag != cmd->a_ref_tag) {
3539 3540 3541 3542 3543 3544
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3545 3546 3547 3548 3549 3550

		cmd->dif_err_code = DIF_ERR_REF;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x3;
3551 3552 3553
		goto out;
	}

3554 3555
	/* check guard */
	if (cmd->e_guard != cmd->a_guard) {
3556 3557 3558 3559 3560 3561 3562
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);

3563 3564 3565 3566 3567
		cmd->dif_err_code = DIF_ERR_GRD;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x1;
3568 3569
	}
out:
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
	switch (cmd->state) {
	case QLA_TGT_STATE_NEED_DATA:
		/* handle_data will load DIF error code  */
		cmd->state = QLA_TGT_STATE_DATA_IN;
		vha->hw->tgt.tgt_ops->handle_data(cmd);
		break;
	default:
		spin_lock_irqsave(&cmd->cmd_lock, flags);
		if (cmd->aborted) {
			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
			vha->hw->tgt.tgt_ops->free_cmd(cmd);
			break;
		}
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3584

3585 3586
		qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
		    ascq);
3587 3588 3589 3590 3591 3592 3593
		/* assume scsi status gets out on the wire.
		 * Will not wait for completion.
		 */
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		break;
	}
}
3594

3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy)
{
	struct nack_to_isp *nack;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;

	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
	    "Sending TERM ELS CTIO (ha=%p)\n", ha);

3608
	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3609 3610 3611 3612 3613 3614 3615 3616 3617
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe080,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;
3618
	pkt->handle = QLA_TGT_SKIP_HANDLE;
3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
	}

	/* terminate */
	nack->u.isp24.flags |=
		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);

	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked)
{
	unsigned long flags = 0;
	int rc;

	if (ha_locked) {
		rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo  */
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3658 3659 3660
#else
		if (rc) {
		}
3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
#endif
		goto done;
	}

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo */
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
#endif

done:
	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}

3678 3679 3680 3681
/*
 * If hardware_lock held on entry, might drop it, then reaquire
 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
 */
3682
static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3683 3684 3685
	struct qla_tgt_cmd *cmd,
	struct atio_from_isp *atio)
{
3686
	struct scsi_qla_host *vha = qpair->vha;
3687 3688 3689 3690
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;
3691
	uint16_t temp;
3692

3693
	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3694

3695 3696 3697
	if (cmd)
		vha = cmd->vha;

3698
	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe050,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	if (cmd != NULL) {
		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
			ql_dbg(ql_dbg_tgt, vha, 0xe051,
			    "qla_target(%d): Terminating cmd %p with "
			    "incorrect state %d\n", vha->vp_idx, cmd,
			    cmd->state);
		} else
			ret = 1;
	}

3716
	qpair->tgt_counters.num_term_xchg_sent++;
3717 3718 3719 3720 3721
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
3722
	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3723
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3724 3725 3726 3727 3728
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3729 3730 3731
	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
		CTIO7_FLAGS_TERMINATE;
	ctio24->u.status1.flags = cpu_to_le16(temp);
3732 3733
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3734

3735 3736
	/* Memory Barrier */
	wmb();
3737 3738 3739 3740
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3741 3742 3743
	return ret;
}

3744
static void qlt_send_term_exchange(struct qla_qpair *qpair,
3745 3746
	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
	int ul_abort)
3747
{
3748
	struct scsi_qla_host *vha;
3749
	unsigned long flags = 0;
3750 3751
	int rc;

3752 3753 3754 3755 3756
	/* why use different vha? NPIV */
	if (cmd)
		vha = cmd->vha;
	else
		vha = qpair->vha;
3757 3758

	if (ha_locked) {
3759
		rc = __qlt_send_term_exchange(qpair, cmd, atio);
3760 3761
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3762 3763
		goto done;
	}
3764 3765
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	rc = __qlt_send_term_exchange(qpair, cmd, atio);
3766 3767
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3768

3769
done:
3770
	if (cmd && !ul_abort && !cmd->aborted) {
3771 3772
		if (cmd->sg_mapped)
			qlt_unmap_sg(vha, cmd);
3773 3774
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
	}
3775 3776

	if (!ha_locked)
3777
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3778

3779
	return;
3780 3781
}

3782 3783 3784 3785 3786 3787
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
	struct list_head free_list;
	struct qla_tgt_cmd *cmd, *tcmd;

	vha->hw->tgt.leak_exchg_thresh_hold =
3788
	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817

	cmd = tcmd = NULL;
	if (!list_empty(&vha->hw->tgt.q_full_list)) {
		INIT_LIST_HEAD(&free_list);
		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);

		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
			list_del(&cmd->cmd_list);
			/* This cmd was never sent to TCM.  There is no need
			 * to schedule free or call free_cmd
			 */
			qlt_free_cmd(cmd);
			vha->hw->tgt.num_qfull_cmds_alloc--;
		}
	}
	vha->hw->tgt.num_qfull_cmds_dropped = 0;
}

static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
{
	uint32_t total_leaked;

	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;

	if (vha->hw->tgt.leak_exchg_thresh_hold &&
	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {

		ql_dbg(ql_dbg_tgt, vha, 0xe079,
		    "Chip reset due to exchange starvation: %d/%d.\n",
3818
		    total_leaked, vha->hw->cur_fw_xcb_count);
3819 3820 3821 3822 3823 3824 3825 3826 3827 3828

		if (IS_P3P_TYPE(vha->hw))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
	}

}

3829
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3830 3831 3832 3833
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct se_cmd *se_cmd = &cmd->se_cmd;
3834
	unsigned long flags;
3835 3836 3837 3838 3839 3840

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
	    "qla_target(%d): terminating exchange for aborted cmd=%p "
	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
	    se_cmd->tag);

3841 3842 3843 3844 3845 3846 3847 3848
	spin_lock_irqsave(&cmd->cmd_lock, flags);
	if (cmd->aborted) {
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
		/*
		 * It's normal to see 2 calls in this path:
		 *  1) XFER Rdy completion + CMD_T_ABORT
		 *  2) TCM TMR - drain_state_list
		 */
3849 3850 3851 3852
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
		    "multiple abort. %p transport_state %x, t_state %x, "
		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3853 3854
		return EIO;
	}
3855
	cmd->aborted = 1;
3856
	cmd->trc_flags |= TRC_ABORT;
3857
	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3858

3859
	qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3860
	return 0;
3861 3862 3863
}
EXPORT_SYMBOL(qlt_abort_cmd);

3864 3865
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
3866
	struct fc_port *sess = cmd->sess;
3867

3868 3869 3870 3871
	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
	    "%s: se_cmd[%p] ox_id %04x\n",
	    __func__, &cmd->se_cmd,
	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3872

3873 3874
	BUG_ON(cmd->cmd_in_wq);

3875 3876 3877
	if (cmd->sg_mapped)
		qlt_unmap_sg(cmd->vha, cmd);

3878 3879 3880
	if (!cmd->q_full)
		qlt_decr_num_pend_cmds(cmd->vha);

3881
	BUG_ON(cmd->sg_mapped);
3882
	cmd->jiffies_at_free = get_jiffies_64();
3883 3884
	if (unlikely(cmd->free_sg))
		kfree(cmd->sg);
3885 3886 3887 3888 3889

	if (!sess || !sess->se_sess) {
		WARN_ON(1);
		return;
	}
3890
	cmd->jiffies_at_free = get_jiffies_64();
3891
	target_free_tag(sess->se_sess, &cmd->se_cmd);
3892 3893 3894 3895 3896 3897
}
EXPORT_SYMBOL(qlt_free_cmd);

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
3898
static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3899 3900 3901
	struct qla_tgt_cmd *cmd, uint32_t status)
{
	int term = 0;
3902
	struct scsi_qla_host *vha = qpair->vha;
3903

3904
	if (cmd->se_cmd.prot_op)
3905
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3906 3907 3908 3909 3910 3911 3912 3913
		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
		    "se_cmd=%p tag[%x] op %#x/%s",
		     cmd->lba, cmd->lba,
		     cmd->num_blks, &cmd->se_cmd,
		     cmd->atio.u.isp24.exchange_addr,
		     cmd->se_cmd.prot_op,
		     prot_op_str(cmd->se_cmd.prot_op));

3914 3915 3916
	if (ctio != NULL) {
		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
		term = !(c->flags &
3917
		    cpu_to_le16(OF_TERM_EXCH));
3918 3919 3920 3921
	} else
		term = 1;

	if (term)
3922
		qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3923 3924 3925 3926 3927 3928

	return term;
}


/* ha->hardware_lock supposed to be held on entry */
3929
static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3930
	struct rsp_que *rsp, uint32_t handle, void *ctio)
3931
{
3932
	void *cmd = NULL;
3933 3934 3935
	struct req_que *req;
	int qid = GET_QID(handle);
	uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3936

3937 3938
	if (unlikely(h == QLA_TGT_SKIP_HANDLE))
		return NULL;
3939

3940 3941 3942 3943 3944 3945 3946 3947 3948 3949
	if (qid == rsp->req->id) {
		req = rsp->req;
	} else if (vha->hw->req_q_map[qid]) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
		    "qla_target(%d): CTIO completion with different QID %d handle %x\n",
		    vha->vp_idx, rsp->id, handle);
		req = vha->hw->req_q_map[qid];
	} else {
		return NULL;
	}
3950

3951
	h &= QLA_CMD_HANDLE_MASK;
3952

3953
	if (h != QLA_TGT_NULL_HANDLE) {
3954
		if (unlikely(h >= req->num_outstanding_cmds)) {
3955 3956 3957 3958 3959
			ql_dbg(ql_dbg_tgt, vha, 0xe052,
			    "qla_target(%d): Wrong handle %x received\n",
			    vha->vp_idx, handle);
			return NULL;
		}
3960

3961
		cmd = (void *) req->outstanding_cmds[h];
3962
		if (unlikely(cmd == NULL)) {
3963
			ql_dbg(ql_dbg_async, vha, 0xe053,
3964 3965
			    "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
				vha->vp_idx, handle, req->id, rsp->id);
3966 3967
			return NULL;
		}
3968
		req->outstanding_cmds[h] = NULL;
3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
	} else if (ctio != NULL) {
		/* We can't get loop ID from CTIO7 */
		ql_dbg(ql_dbg_tgt, vha, 0xe054,
		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
		    "support NULL handles\n", vha->vp_idx);
		return NULL;
	}

	return cmd;
}

3980
/* hardware_lock should be held by caller. */
3981
void
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
	struct qla_hw_data *ha = vha->hw;

	if (cmd->sg_mapped)
		qlt_unmap_sg(vha, cmd);

	/* TODO: fix debug message type and ids. */
	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
		ql_dbg(ql_dbg_io, vha, 0xff00,
3992
		    "HOST-ABORT: state=PROCESSED.\n");
3993 3994 3995 3996 3997
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->write_data_transferred = 0;
		cmd->state = QLA_TGT_STATE_DATA_IN;

		ql_dbg(ql_dbg_io, vha, 0xff01,
3998
		    "HOST-ABORT: state=DATA_IN.\n");
3999 4000 4001 4002 4003

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
	} else {
		ql_dbg(ql_dbg_io, vha, 0xff03,
4004
		    "HOST-ABORT: state=BAD(%d).\n",
4005 4006 4007 4008
		    cmd->state);
		dump_stack();
	}

4009
	cmd->trc_flags |= TRC_FLUSH;
4010 4011 4012
	ha->tgt.tgt_ops->free_cmd(cmd);
}

4013 4014 4015
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
4016 4017
static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
    struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
4018 4019 4020 4021
{
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd;
	struct qla_tgt_cmd *cmd;
4022
	struct qla_qpair *qpair = rsp->qpair;
4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033

	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
		/* That could happen only in case of an error/reset/abort */
		if (status != CTIO_SUCCESS) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
			    "Intermediate CTIO received"
			    " (status %x)\n", status);
		}
		return;
	}

4034
	cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
4035
	if (cmd == NULL)
4036
		return;
4037

4038
	se_cmd = &cmd->se_cmd;
4039
	cmd->cmd_sent_to_fw = 0;
4040

J
Joern Engel 已提交
4041
	qlt_unmap_sg(vha, cmd);
4042 4043 4044

	if (unlikely(status != CTIO_SUCCESS)) {
		switch (status & 0xFFFF) {
4045 4046 4047 4048 4049 4050 4051 4052 4053
		case CTIO_INVALID_RX_ID:
			if (printk_ratelimit())
				dev_info(&vha->hw->pdev->dev,
				    "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
				    vha->vp_idx, cmd->atio.u.isp24.attr,
				    ((cmd->ctio_flags >> 9) & 0xf),
				    cmd->ctio_flags);

			break;
4054 4055 4056
		case CTIO_LIP_RESET:
		case CTIO_TARGET_RESET:
		case CTIO_ABORTED:
4057
			/* driver request abort via Terminate exchange */
4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069
		case CTIO_TIMEOUT:
			/* They are OK */
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
			    "qla_target(%d): CTIO with "
			    "status %#x received, state %x, se_cmd %p, "
			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
			    status, cmd->state, se_cmd);
			break;

		case CTIO_PORT_LOGGED_OUT:
		case CTIO_PORT_UNAVAILABLE:
4070
		{
4071 4072 4073
			int logged_out =
				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;

4074
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4075
			    "qla_target(%d): CTIO with %s status %x "
4076
			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
4077
			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
4078 4079
			    status, cmd->state, se_cmd);

4080 4081 4082 4083 4084 4085
			if (logged_out && cmd->sess) {
				/*
				 * Session is already logged out, but we need
				 * to notify initiator, who's not aware of this
				 */
				cmd->sess->send_els_logo = 1;
4086
				ql_dbg(ql_dbg_disc, vha, 0x20f8,
4087 4088 4089
				    "%s %d %8phC post del sess\n",
				    __func__, __LINE__, cmd->sess->port_name);

4090
				qlt_schedule_sess_for_deletion(cmd->sess);
4091 4092 4093
			}
			break;
		}
4094 4095 4096 4097
		case CTIO_DIF_ERROR: {
			struct ctio_crc_from_fw *crc =
				(struct ctio_crc_from_fw *)ctio;
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4098 4099 4100
			    "qla_target(%d): CTIO with DIF_ERROR status %x "
			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
			    "expect_dif[0x%llx]\n",
4101 4102 4103 4104
			    vha->vp_idx, status, cmd->state, se_cmd,
			    *((u64 *)&crc->actual_dif[0]),
			    *((u64 *)&crc->expected_dif[0]));

4105
			qlt_handle_dif_error(qpair, cmd, ctio);
4106
			return;
4107
		}
4108 4109
		default:
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4110
			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4111 4112 4113 4114
			    vha->vp_idx, status, cmd->state, se_cmd);
			break;
		}

4115

4116
		/* "cmd->aborted" means
4117 4118 4119 4120 4121 4122
		 * cmd is already aborted/terminated, we don't
		 * need to terminate again.  The exchange is already
		 * cleaned up/freed at FW level.  Just cleanup at driver
		 * level.
		 */
		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4123
		    (!cmd->aborted)) {
4124
			cmd->trc_flags |= TRC_CTIO_ERR;
4125
			if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4126
				return;
4127
		}
4128 4129 4130
	}

	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4131
		cmd->trc_flags |= TRC_CTIO_DONE;
4132 4133 4134
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->state = QLA_TGT_STATE_DATA_IN;

4135
		if (status == CTIO_SUCCESS)
4136 4137 4138 4139
			cmd->write_data_transferred = 1;

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
4140
	} else if (cmd->aborted) {
4141
		cmd->trc_flags |= TRC_CTIO_ABORTED;
4142
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4143
		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4144
	} else {
4145
		cmd->trc_flags |= TRC_CTIO_STRANGE;
4146 4147 4148 4149 4150
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
		    "qla_target(%d): A command in state (%d) should "
		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
	}

4151
	if (unlikely(status != CTIO_SUCCESS) &&
4152
		!cmd->aborted) {
4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
		dump_stack();
	}

	ha->tgt.tgt_ops->free_cmd(cmd);
}

static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
	uint8_t task_codes)
{
	int fcp_task_attr;

	switch (task_codes) {
	case ATIO_SIMPLE_QUEUE:
C
Christoph Hellwig 已提交
4167
		fcp_task_attr = TCM_SIMPLE_TAG;
4168 4169
		break;
	case ATIO_HEAD_OF_QUEUE:
C
Christoph Hellwig 已提交
4170
		fcp_task_attr = TCM_HEAD_TAG;
4171 4172
		break;
	case ATIO_ORDERED_QUEUE:
C
Christoph Hellwig 已提交
4173
		fcp_task_attr = TCM_ORDERED_TAG;
4174 4175
		break;
	case ATIO_ACA_QUEUE:
C
Christoph Hellwig 已提交
4176
		fcp_task_attr = TCM_ACA_TAG;
4177 4178
		break;
	case ATIO_UNTAGGED:
C
Christoph Hellwig 已提交
4179
		fcp_task_attr = TCM_SIMPLE_TAG;
4180 4181 4182 4183 4184
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
		    "qla_target: unknown task code %x, use ORDERED instead\n",
		    task_codes);
C
Christoph Hellwig 已提交
4185
		fcp_task_attr = TCM_ORDERED_TAG;
4186 4187 4188 4189 4190 4191
		break;
	}

	return fcp_task_attr;
}

4192
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
4193 4194 4195 4196
					uint8_t *);
/*
 * Process context for I/O path into tcm_qla2xxx code
 */
4197
static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4198 4199 4200
{
	scsi_qla_host_t *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
4201
	struct fc_port *sess = cmd->sess;
4202 4203 4204 4205 4206
	struct atio_from_isp *atio = &cmd->atio;
	unsigned char *cdb;
	unsigned long flags;
	uint32_t data_length;
	int ret, fcp_task_attr, data_dir, bidi = 0;
4207
	struct qla_qpair *qpair = cmd->qpair;
4208

4209
	cmd->cmd_in_wq = 0;
4210
	cmd->trc_flags |= TRC_DO_WORK;
4211

4212
	if (cmd->aborted) {
4213 4214 4215 4216 4217 4218
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
		    "cmd with tag %u is aborted\n",
		    cmd->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4219
	spin_lock_init(&cmd->cmd_lock);
4220
	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4221
	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235

	if (atio->u.isp24.fcp_cmnd.rddata &&
	    atio->u.isp24.fcp_cmnd.wrdata) {
		bidi = 1;
		data_dir = DMA_TO_DEVICE;
	} else if (atio->u.isp24.fcp_cmnd.rddata)
		data_dir = DMA_FROM_DEVICE;
	else if (atio->u.isp24.fcp_cmnd.wrdata)
		data_dir = DMA_TO_DEVICE;
	else
		data_dir = DMA_NONE;

	fcp_task_attr = qlt_get_fcp_task_attr(vha,
	    atio->u.isp24.fcp_cmnd.task_attr);
4236
	data_length = get_datalen_for_atio(atio);
4237

4238 4239
	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
				          fcp_task_attr, data_dir, bidi);
4240 4241 4242 4243 4244
	if (ret != 0)
		goto out_term;
	/*
	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
	 */
4245
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4246
	ha->tgt.tgt_ops->put_sess(sess);
4247
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4248 4249 4250
	return;

out_term:
4251
	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4252
	/*
4253 4254
	 * cmd has not sent to target yet, so pass NULL as the second
	 * argument to qlt_send_term_exchange() and free the memory here.
4255
	 */
4256
	cmd->trc_flags |= TRC_DO_WORK_ERR;
4257 4258
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4259 4260

	qlt_decr_num_pend_cmds(vha);
4261
	target_free_tag(sess->se_sess, &cmd->se_cmd);
4262
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4263 4264

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4265
	ha->tgt.tgt_ops->put_sess(sess);
4266
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4267 4268 4269 4270 4271
}

static void qlt_do_work(struct work_struct *work)
{
	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4272 4273 4274 4275 4276 4277
	scsi_qla_host_t *vha = cmd->vha;
	unsigned long flags;

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&cmd->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4278 4279 4280 4281

	__qlt_do_work(cmd);
}

4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306
void qlt_clr_qp_table(struct scsi_qla_host *vha)
{
	unsigned long flags;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	void *node;
	u64 key = 0;

	ql_log(ql_log_info, vha, 0x706c,
	    "User update Number of Active Qpairs %d\n",
	    ha->tgt.num_act_qpairs);

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);

	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
		btree_remove64(&tgt->lun_qpair_map, key);

	ha->base_qpair->lun_cnt = 0;
	for (key = 0; key < ha->max_qpairs; key++)
		if (ha->queue_pair_map[key])
			ha->queue_pair_map[key]->lun_cnt = 0;

	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
}

4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385
static void qlt_assign_qpair(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd)
{
	struct qla_qpair *qpair, *qp;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_qpair_hint *h;

	if (vha->flags.qpairs_available) {
		h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
		if (unlikely(!h)) {
			/* spread lun to qpair ratio evently */
			int lcnt = 0, rc;
			struct scsi_qla_host *base_vha =
				pci_get_drvdata(vha->hw->pdev);

			qpair = vha->hw->base_qpair;
			if (qpair->lun_cnt == 0) {
				qpair->lun_cnt++;
				h = qla_qpair_to_hint(tgt, qpair);
				BUG_ON(!h);
				rc = btree_insert64(&tgt->lun_qpair_map,
					cmd->unpacked_lun, h, GFP_ATOMIC);
				if (rc) {
					qpair->lun_cnt--;
					ql_log(ql_log_info, vha, 0xd037,
					    "Unable to insert lun %llx into lun_qpair_map\n",
					    cmd->unpacked_lun);
				}
				goto out;
			} else {
				lcnt = qpair->lun_cnt;
			}

			h = NULL;
			list_for_each_entry(qp, &base_vha->qp_list,
			    qp_list_elem) {
				if (qp->lun_cnt == 0) {
					qp->lun_cnt++;
					h = qla_qpair_to_hint(tgt, qp);
					BUG_ON(!h);
					rc = btree_insert64(&tgt->lun_qpair_map,
					    cmd->unpacked_lun, h, GFP_ATOMIC);
					if (rc) {
						qp->lun_cnt--;
						ql_log(ql_log_info, vha, 0xd038,
							"Unable to insert lun %llx into lun_qpair_map\n",
							cmd->unpacked_lun);
					}
					qpair = qp;
					goto out;
				} else {
					if (qp->lun_cnt < lcnt) {
						lcnt = qp->lun_cnt;
						qpair = qp;
						continue;
					}
				}
			}
			BUG_ON(!qpair);
			qpair->lun_cnt++;
			h = qla_qpair_to_hint(tgt, qpair);
			BUG_ON(!h);
			rc = btree_insert64(&tgt->lun_qpair_map,
				cmd->unpacked_lun, h, GFP_ATOMIC);
			if (rc) {
				qpair->lun_cnt--;
				ql_log(ql_log_info, vha, 0xd039,
				   "Unable to insert lun %llx into lun_qpair_map\n",
				   cmd->unpacked_lun);
			}
		}
	} else {
		h = &tgt->qphints[0];
	}
out:
	cmd->qpair = h->qpair;
	cmd->se_cmd.cpuid = h->cpuid;
}

4386
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4387
				       struct fc_port *sess,
4388 4389 4390 4391
				       struct atio_from_isp *atio)
{
	struct se_session *se_sess = sess->se_sess;
	struct qla_tgt_cmd *cmd;
4392
	int tag, cpu;
4393

4394
	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
4395 4396 4397 4398 4399
	if (tag < 0)
		return NULL;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4400
	cmd->cmd_type = TYPE_TGT_CMD;
4401 4402 4403
	memcpy(&cmd->atio, atio, sizeof(*atio));
	cmd->state = QLA_TGT_STATE_NEW;
	cmd->tgt = vha->vha_tgt.qla_tgt;
4404
	qlt_incr_num_pend_cmds(vha);
4405 4406
	cmd->vha = vha;
	cmd->se_cmd.map_tag = tag;
4407
	cmd->se_cmd.map_cpu = cpu;
4408 4409 4410 4411
	cmd->sess = sess;
	cmd->loop_id = sess->loop_id;
	cmd->conf_compl_supported = sess->conf_compl_supported;

4412
	cmd->trc_flags = 0;
4413 4414
	cmd->jiffies_at_alloc = get_jiffies_64();

4415 4416 4417
	cmd->unpacked_lun = scsilun_to_int(
	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
	qlt_assign_qpair(vha, cmd);
4418
	cmd->reset_count = vha->hw->base_qpair->chip_reset;
4419
	cmd->vp_idx = vha->vp_idx;
4420

4421 4422 4423
	return cmd;
}

4424 4425 4426 4427
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
	struct atio_from_isp *atio)
{
4428
	struct qla_hw_data *ha = vha->hw;
4429
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4430
	struct fc_port *sess;
4431
	struct qla_tgt_cmd *cmd;
4432
	unsigned long flags;
4433
	port_id_t id;
4434 4435

	if (unlikely(tgt->tgt_stop)) {
4436
		ql_dbg(ql_dbg_io, vha, 0x3061,
4437
		    "New command while device %p is shutting down\n", tgt);
4438
		return -ENODEV;
4439 4440
	}

4441 4442 4443 4444 4445 4446
	id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
	id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
	id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
	if (IS_SW_RESV_ADDR(id))
		return -EBUSY;

4447
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4448 4449
	if (unlikely(!sess))
		return -EFAULT;
4450 4451 4452

	/* Another WWN used to have our s_id. Our PLOGI scheduled its
	 * session deletion, but it's still in sess_del_work wq */
4453
	if (sess->deleted) {
4454
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4455 4456 4457 4458 4459
		    "New command while old session %p is being deleted\n",
		    sess);
		return -EFAULT;
	}

4460 4461 4462
	/*
	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
	 */
4463
	if (!kref_get_unless_zero(&sess->sess_kref)) {
4464
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4465 4466 4467 4468 4469
		    "%s: kref_get fail, %8phC oxid %x \n",
		    __func__, sess->port_name,
		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
		return -EFAULT;
	}
4470 4471

	cmd = qlt_get_tag(vha, sess, atio);
4472
	if (!cmd) {
4473
		ql_dbg(ql_dbg_io, vha, 0x3062,
4474
		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4475 4476 4477
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		ha->tgt.tgt_ops->put_sess(sess);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4478
		return -EBUSY;
4479 4480
	}

4481
	cmd->cmd_in_wq = 1;
4482
	cmd->trc_flags |= TRC_NEW_CMD;
4483

4484
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4485
	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4486
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4487

4488
	INIT_WORK(&cmd->work, qlt_do_work);
4489 4490 4491
	if (vha->flags.qpairs_available) {
		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
	} else if (ha->msix_count) {
4492 4493 4494 4495 4496 4497 4498 4499 4500
		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
			queue_work_on(smp_processor_id(), qla_tgt_wq,
			    &cmd->work);
		else
			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
			    &cmd->work);
	} else {
		queue_work(qla_tgt_wq, &cmd->work);
	}
4501

4502
	return 0;
4503 4504 4505
}

/* ha->hardware_lock supposed to be held on entry */
4506
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4507 4508 4509 4510 4511
	int fn, void *iocb, int flags)
{
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4512
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4513
	struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (!mcmd) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
		    "qla_target(%d): Allocation of management "
		    "command failed, some commands and their data could "
		    "leak\n", vha->vp_idx);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));
	mcmd->sess = sess;

	if (iocb) {
		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
		    sizeof(mcmd->orig_iocb.imm_ntfy));
	}
	mcmd->tmr_func = fn;
	mcmd->flags = flags;
4532
	mcmd->reset_count = ha->base_qpair->chip_reset;
4533
	mcmd->qpair = h->qpair;
4534
	mcmd->vha = vha;
4535 4536
	mcmd->se_cmd.cpuid = h->cpuid;
	mcmd->unpacked_lun = lun;
4537 4538 4539

	switch (fn) {
	case QLA_TGT_LUN_RESET:
4540 4541 4542 4543 4544 4545 4546 4547 4548
	case QLA_TGT_CLEAR_TS:
	case QLA_TGT_ABORT_TS:
		abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
		/* drop through */
	case QLA_TGT_CLEAR_ACA:
		h = qlt_find_qphint(vha, mcmd->unpacked_lun);
		mcmd->qpair = h->qpair;
		mcmd->se_cmd.cpuid = h->cpuid;
		break;
4549

4550 4551 4552 4553 4554 4555 4556
	case QLA_TGT_TARGET_RESET:
	case QLA_TGT_NEXUS_LOSS_SESS:
	case QLA_TGT_NEXUS_LOSS:
	case QLA_TGT_ABORT_ALL:
	default:
		/* no-op */
		break;
4557 4558
	}

4559 4560 4561 4562
	INIT_WORK(&mcmd->work, qlt_do_tmr_work);
	queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
	    &mcmd->work);

4563 4564 4565 4566 4567 4568 4569 4570
	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
4571
	struct fc_port *sess;
4572
	u64 unpacked_lun;
4573
	int fn;
4574
	unsigned long flags;
4575 4576

	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4577 4578

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4579 4580
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    a->u.isp24.fcp_hdr.s_id);
4581 4582
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4583 4584
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4585

4586
	if (sess == NULL || sess->deleted)
4587 4588
		return -EFAULT;

4589 4590 4591 4592 4593
	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}

/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
4594
	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4595 4596 4597 4598
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4599
	u64 unpacked_lun;
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614
	int rc;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
	    sizeof(mcmd->orig_iocb.imm_ntfy));

4615 4616
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4617
	mcmd->reset_count = ha->base_qpair->chip_reset;
4618
	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4619
	mcmd->qpair = ha->base_qpair;
4620

4621
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638
	    le16_to_cpu(iocb->u.isp2x.seq_id));
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
		    vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_abort_task(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
4639
	struct fc_port *sess;
4640
	int loop_id;
4641
	unsigned long flags;
4642 4643 4644

	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);

4645
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4646
	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4647 4648
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4649 4650 4651 4652
	if (sess == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
		    "qla_target(%d): task abort for unexisting "
		    "session\n", vha->vp_idx);
4653
		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4654 4655 4656 4657 4658 4659
		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
	}

	return __qlt_abort_task(vha, iocb, sess);
}

4660 4661
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675
	if (rc != MBS_COMMAND_COMPLETE) {
		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
			"%s: se_sess %p / sess %p from"
			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
			" LOGO failed: %#x\n",
			__func__,
			fcport->se_sess,
			fcport,
			fcport->port_name, fcport->loop_id,
			fcport->d_id.b.domain, fcport->d_id.b.area,
			fcport->d_id.b.al_pa, rc);
	}

	fcport->logout_completed = 1;
4676 4677 4678 4679 4680 4681 4682 4683 4684
}

/*
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
*
* Schedules sessions with matching port_id/loop_id but different wwn for
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
4685 4686
struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4687
    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4688
{
4689
	struct fc_port *sess = NULL, *other_sess;
4690 4691
	uint64_t other_wwn;

4692 4693
	*conflict_sess = NULL;

4694
	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4695 4696 4697 4698 4699 4700 4701 4702 4703 4704

		other_wwn = wwn_to_u64(other_sess->port_name);

		if (wwn == other_wwn) {
			WARN_ON(sess);
			sess = other_sess;
			continue;
		}

		/* find other sess with nport_id collision */
4705
		if (port_id.b24 == other_sess->d_id.b24) {
4706
			if (loop_id != other_sess->loop_id) {
4707
				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4708 4709 4710 4711 4712 4713 4714 4715 4716
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);

				/*
				 * logout_on_delete is set by default, but another
				 * session that has the same s_id/loop_id combo
				 * might have cleared it when requested this session
				 * deletion, so don't touch it
				 */
4717
				qlt_schedule_sess_for_deletion(other_sess);
4718 4719 4720
			} else {
				/*
				 * Another wwn used to have our s_id/loop_id
4721
				 * kill the session, but don't free the loop_id
4722
				 */
4723
				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4724 4725 4726
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);

4727
				other_sess->keep_nport_handle = 1;
4728 4729
				if (other_sess->disc_state != DSC_DELETED)
					*conflict_sess = other_sess;
4730
				qlt_schedule_sess_for_deletion(other_sess);
4731 4732 4733 4734 4735
			}
			continue;
		}

		/* find other sess with nport handle collision */
4736 4737 4738
		if ((loop_id == other_sess->loop_id) &&
			(loop_id != FC_NO_LOOP_ID)) {
			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4739 4740 4741 4742 4743
			       "Invalidating sess %p loop_id %d wwn %llx.\n",
			       other_sess, other_sess->loop_id, other_wwn);

			/* Same loop_id but different s_id
			 * Ok to kill and logout */
4744
			qlt_schedule_sess_for_deletion(other_sess);
4745 4746 4747 4748 4749 4750
		}
	}

	return sess;
}

4751 4752 4753 4754 4755 4756 4757
/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
	int count = 0;
4758
	unsigned long flags;
4759 4760 4761 4762 4763

	key = (((u32)s_id->b.domain << 16) |
	       ((u32)s_id->b.area   <<  8) |
	       ((u32)s_id->b.al_pa));

4764
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4765 4766
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4767

4768 4769 4770 4771 4772
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}
4773 4774 4775 4776 4777 4778 4779 4780 4781

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}

4782 4783 4784
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		if (cmd_key == key) {
4785
			cmd->aborted = 1;
4786 4787 4788
			count++;
		}
	}
4789
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4790 4791 4792 4793

	return count;
}

4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821
static int qlt_handle_login(struct scsi_qla_host *vha,
    struct imm_ntfy_from_isp *iocb)
{
	struct fc_port *sess = NULL, *conflict_sess = NULL;
	uint64_t wwn;
	port_id_t port_id;
	uint16_t loop_id, wd3_lo;
	int res = 0;
	struct qlt_plogi_ack_t *pla;
	unsigned long flags;

	wwn = wwn_to_u64(iocb->u.isp24.port_name);

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	/* Mark all stale commands sitting in qla_tgt_wq for deletion */
	abort_cmds_for_s_id(vha, &port_id);

	if (wwn) {
		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
		sess = qlt_find_sess_invalidate_other(vha, wwn,
		    port_id, loop_id, &conflict_sess);
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4822 4823 4824 4825 4826 4827
	} else {
		ql_dbg(ql_dbg_disc, vha, 0xffff,
		    "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
		    __func__, __LINE__, loop_id, port_id.b24);
		qlt_send_term_imm_notif(vha, iocb, 1);
		goto out;
4828 4829 4830 4831 4832 4833 4834 4835 4836
	}

	if (IS_SW_RESV_ADDR(port_id)) {
		res = 1;
		goto out;
	}

	pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
	if (!pla) {
4837 4838 4839 4840
		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
		    "%s %d %8phC Term INOT due to mem alloc fail",
		    __func__, __LINE__,
		    iocb->u.isp24.port_name);
4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855
		qlt_send_term_imm_notif(vha, iocb, 1);
		goto out;
	}

	if (conflict_sess) {
		conflict_sess->login_gen++;
		qlt_plogi_ack_link(vha, pla, conflict_sess,
		    QLT_PLOGI_LINK_CONFLICT);
	}

	if (!sess) {
		pla->ref_count++;
		ql_dbg(ql_dbg_disc, vha, 0xffff,
		    "%s %d %8phC post new sess\n",
		    __func__, __LINE__, iocb->u.isp24.port_name);
4856 4857 4858 4859 4860 4861 4862 4863 4864 4865
		if (iocb->u.isp24.status_subcode == ELS_PLOGI)
			qla24xx_post_newsess_work(vha, &port_id,
			    iocb->u.isp24.port_name,
			    iocb->u.isp24.u.plogi.node_name,
			    pla, FC4_TYPE_UNKNOWN);
		else
			qla24xx_post_newsess_work(vha, &port_id,
			    iocb->u.isp24.port_name, NULL,
			    pla, FC4_TYPE_UNKNOWN);

4866 4867 4868
		goto out;
	}

4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894
	if (sess->disc_state == DSC_UPD_FCPORT) {
		u16 sec;

		/*
		 * Remote port registration is still going on from
		 * previous login. Allow it to finish before we
		 * accept the new login.
		 */
		sess->next_disc_state = DSC_DELETE_PEND;
		sec = jiffies_to_msecs(jiffies -
		    sess->jiffies_at_registration) / 1000;
		if (sess->sec_since_registration < sec && sec &&
		    !(sec % 5)) {
			sess->sec_since_registration = sec;
			ql_dbg(ql_dbg_disc, vha, 0xffff,
			    "%s %8phC - Slow Rport registration (%d Sec)\n",
			    __func__, sess->port_name, sec);
		}

		if (!conflict_sess)
			kmem_cache_free(qla_tgt_plogi_cachep, pla);

		qlt_send_term_imm_notif(vha, iocb, 1);
		goto out;
	}

4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954
	qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
	sess->d_id = port_id;
	sess->login_gen++;

	if (iocb->u.isp24.status_subcode == ELS_PRLI) {
		sess->fw_login_state = DSC_LS_PRLI_PEND;
		sess->local = 0;
		sess->loop_id = loop_id;
		sess->d_id = port_id;
		sess->fw_login_state = DSC_LS_PRLI_PEND;
		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);

		if (wd3_lo & BIT_7)
			sess->conf_compl_supported = 1;

		if ((wd3_lo & BIT_4) == 0)
			sess->port_type = FCT_INITIATOR;
		else
			sess->port_type = FCT_TARGET;

	} else
		sess->fw_login_state = DSC_LS_PLOGI_PEND;


	ql_dbg(ql_dbg_disc, vha, 0x20f9,
	    "%s %d %8phC  DS %d\n",
	    __func__, __LINE__, sess->port_name, sess->disc_state);

	switch (sess->disc_state) {
	case DSC_DELETED:
		qlt_plogi_ack_unref(vha, pla);
		break;

	default:
		/*
		 * Under normal circumstances we want to release nport handle
		 * during LOGO process to avoid nport handle leaks inside FW.
		 * The exception is when LOGO is done while another PLOGI with
		 * the same nport handle is waiting as might be the case here.
		 * Note: there is always a possibily of a race where session
		 * deletion has already started for other reasons (e.g. ACL
		 * removal) and now PLOGI arrives:
		 * 1. if PLOGI arrived in FW after nport handle has been freed,
		 *    FW must have assigned this PLOGI a new/same handle and we
		 *    can proceed ACK'ing it as usual when session deletion
		 *    completes.
		 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
		 *    bit reached it, the handle has now been released. We'll
		 *    get an error when we ACK this PLOGI. Nothing will be sent
		 *    back to initiator. Initiator should eventually retry
		 *    PLOGI and situation will correct itself.
		 */
		sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
		    (sess->d_id.b24 == port_id.b24));

		ql_dbg(ql_dbg_disc, vha, 0x20f9,
		    "%s %d %8phC post del sess\n",
		    __func__, __LINE__, sess->port_name);


4955
		qlt_schedule_sess_for_deletion(sess);
4956 4957 4958 4959 4960 4961
		break;
	}
out:
	return res;
}

4962 4963 4964 4965 4966 4967
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
4968
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4969
	struct qla_hw_data *ha = vha->hw;
4970
	struct fc_port *sess = NULL, *conflict_sess = NULL;
4971 4972 4973 4974
	uint64_t wwn;
	port_id_t port_id;
	uint16_t loop_id;
	uint16_t wd3_lo;
4975
	int res = 0;
4976
	unsigned long flags;
4977

4978 4979 4980 4981 4982 4983 4984 4985 4986
	wwn = wwn_to_u64(iocb->u.isp24.port_name);

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

4987 4988 4989 4990 4991 4992
	ql_dbg(ql_dbg_disc, vha, 0xf026,
	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
	    vha->vp_idx, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		   iocb->u.isp24.status_subcode, loop_id,
		iocb->u.isp24.port_name);
4993

4994 4995 4996
	/* res = 1 means ack at the end of thread
	 * res = 0 means ack async/later.
	 */
4997 4998
	switch (iocb->u.isp24.status_subcode) {
	case ELS_PLOGI:
4999 5000
		res = qlt_handle_login(vha, iocb);
		break;
5001

5002 5003 5004 5005
	case ELS_PRLI:
		if (N2N_TOPO(ha)) {
			sess = qla2x00_find_fcport_by_wwpn(vha,
			    iocb->u.isp24.port_name, 1);
5006

5007 5008 5009 5010 5011 5012 5013 5014
			if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
				ql_dbg(ql_dbg_disc, vha, 0xffff,
				    "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
				    __func__, __LINE__,
				    iocb->u.isp24.port_name);
				qlt_send_term_imm_notif(vha, iocb, 1);
				break;
			}
5015

5016
			res = qlt_handle_login(vha, iocb);
5017 5018 5019
			break;
		}

5020 5021 5022 5023 5024
		if (IS_SW_RESV_ADDR(port_id)) {
			res = 1;
			break;
		}

5025 5026
		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);

5027 5028
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5029 5030
			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
				loop_id, &conflict_sess);
5031 5032
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
5033 5034

		if (conflict_sess) {
5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048
			switch (conflict_sess->disc_state) {
			case DSC_DELETED:
			case DSC_DELETE_PEND:
				break;
			default:
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
				    "PRLI with conflicting sess %p port %8phC\n",
				    conflict_sess, conflict_sess->port_name);
				conflict_sess->fw_login_state =
				    DSC_LS_PORT_UNAVAIL;
				qlt_send_term_imm_notif(vha, iocb, 1);
				res = 0;
				break;
			}
5049
		}
5050 5051

		if (sess != NULL) {
5052
			bool delete = false;
5053
			int sec;
5054 5055
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
			switch (sess->fw_login_state) {
5056
			case DSC_LS_PLOGI_PEND:
5057 5058 5059 5060
			case DSC_LS_PLOGI_COMP:
			case DSC_LS_PRLI_COMP:
				break;
			default:
5061 5062 5063 5064 5065
				delete = true;
				break;
			}

			switch (sess->disc_state) {
5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081
			case DSC_UPD_FCPORT:
				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
				    flags);

				sec = jiffies_to_msecs(jiffies -
				    sess->jiffies_at_registration)/1000;
				if (sess->sec_since_registration < sec && sec &&
				    !(sec % 5)) {
					sess->sec_since_registration = sec;
					ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
					    "%s %8phC : Slow Rport registration(%d Sec)\n",
					    __func__, sess->port_name, sec);
				}
				qlt_send_term_imm_notif(vha, iocb, 1);
				return 0;

5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092
			case DSC_LOGIN_PEND:
			case DSC_GPDB:
			case DSC_LOGIN_COMPLETE:
			case DSC_ADISC:
				delete = false;
				break;
			default:
				break;
			}

			if (delete) {
5093 5094
				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
				    flags);
5095 5096 5097 5098 5099
				/*
				 * Impatient initiator sent PRLI before last
				 * PLOGI could finish. Will force him to re-try,
				 * while last one finishes.
				 */
5100
				ql_log(ql_log_warn, sess->vha, 0xf095,
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111
				    "sess %p PRLI received, before plogi ack.\n",
				    sess);
				qlt_send_term_imm_notif(vha, iocb, 1);
				res = 0;
				break;
			}

			/*
			 * This shouldn't happen under normal circumstances,
			 * since we have deleted the old session during PLOGI
			 */
5112
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5113 5114 5115 5116 5117
			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
			    sess->loop_id, sess, iocb->u.isp24.nport_handle);

			sess->local = 0;
			sess->loop_id = loop_id;
5118
			sess->d_id = port_id;
5119
			sess->fw_login_state = DSC_LS_PRLI_PEND;
5120 5121 5122 5123

			if (wd3_lo & BIT_7)
				sess->conf_compl_supported = 1;

5124 5125 5126 5127
			if ((wd3_lo & BIT_4) == 0)
				sess->port_type = FCT_INITIATOR;
			else
				sess->port_type = FCT_TARGET;
5128 5129

			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5130 5131 5132 5133 5134
		}
		res = 1; /* send notify ack */

		/* Make session global (not used in fabric mode) */
		if (ha->current_topology != ISP_CFG_F) {
5135
			if (sess) {
5136
				ql_dbg(ql_dbg_disc, vha, 0x20fa,
5137 5138 5139 5140 5141 5142 5143 5144 5145 5146
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			} else {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
5147
		} else {
5148
			if (sess) {
5149
				ql_dbg(ql_dbg_disc, vha, 0x20fb,
5150 5151
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
5152 5153 5154 5155 5156
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			}
		}
5157 5158
		break;

5159 5160 5161 5162 5163 5164 5165 5166
	case ELS_TPRLO:
		if (le16_to_cpu(iocb->u.isp24.flags) &
			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
			loop_id = 0xFFFF;
			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
			res = 1;
			break;
		}
5167
		/* fall through */
5168 5169
	case ELS_LOGO:
	case ELS_PRLO:
5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		if (sess) {
			sess->login_gen++;
			sess->fw_login_state = DSC_LS_LOGO_PEND;
			sess->logo_ack_needed = 1;
			memcpy(sess->iocb, iocb, IOCB_SIZE);
		}

5181
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5182

5183
		ql_dbg(ql_dbg_disc, vha, 0x20fc,
5184 5185 5186
		    "%s: logo %llx res %d sess %p ",
		    __func__, wwn, res, sess);
		if (res == 0) {
5187 5188 5189 5190
			/*
			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
			 * for LOGO_ACK & sess delete
			 */
5191 5192 5193
			BUG_ON(!sess);
			res = 0;
		} else {
5194
			/* cmd did not go to upper layer. */
5195
			if (sess) {
5196
				qlt_schedule_sess_for_deletion(sess);
5197 5198 5199 5200
				res = 0;
			}
			/* else logo will be ack */
		}
5201 5202 5203 5204
		break;
	case ELS_PDISC:
	case ELS_ADISC:
	{
5205
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5206
		if (tgt->link_reinit_iocb_pending) {
5207 5208
			qlt_send_notify_ack(ha->base_qpair,
			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5209 5210
			tgt->link_reinit_iocb_pending = 0;
		}
5211 5212 5213 5214

		sess = qla2x00_find_fcport_by_wwpn(vha,
		    iocb->u.isp24.port_name, 1);
		if (sess) {
5215
			ql_dbg(ql_dbg_disc, vha, 0x20fd,
5216 5217 5218 5219 5220
				"sess %p lid %d|%d DS %d LS %d\n",
				sess, sess->loop_id, loop_id,
				sess->disc_state, sess->fw_login_state);
		}

5221 5222 5223 5224
		res = 1; /* send notify ack */
		break;
	}

5225
	case ELS_FLOGI:	/* should never happen */
5226 5227 5228 5229 5230 5231 5232 5233
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
		    "qla_target(%d): Unsupported ELS command %x "
		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
		break;
	}

5234 5235 5236 5237
	ql_dbg(ql_dbg_disc, vha, 0xf026,
	    "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
	    vha->vp_idx, iocb->u.isp24.status_subcode, res);

5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267
	return res;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t add_flags = 0;
	int send_notify_ack = 1;
	uint16_t status;

	status = le16_to_cpu(iocb->u.isp2x.status);
	switch (status) {
	case IMM_NTFY_LIP_RESET:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_LIP_LINK_REINIT:
	{
5268
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5269 5270 5271 5272 5273 5274
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
		    "qla_target(%d): LINK REINIT (loop %#x, "
		    "subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);
		if (tgt->link_reinit_iocb_pending) {
5275 5276
			qlt_send_notify_ack(ha->base_qpair,
			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367
		}
		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
		tgt->link_reinit_iocb_pending = 1;
		/*
		 * QLogic requires to wait after LINK REINIT for possible
		 * PDISC or ADISC ELS commands
		 */
		send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_PORT_LOGOUT:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
		    "qla_target(%d): Port logout (loop "
		    "%#x, subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_TPRLO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_PORT_CONFIG:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
		    status);
		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_LOGO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
		    "qla_target(%d): Link failure detected\n",
		    vha->vp_idx);
		/* I_T nexus loss */
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_IOCB_OVERFLOW:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
		    "qla_target(%d): Cannot provide requested "
		    "capability (IOCB overflowed the immediate notify "
		    "resource count)\n", vha->vp_idx);
		break;

	case IMM_NTFY_ABORT_TASK:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
		    "qla_target(%d): Abort Task (S %08x I %#x -> "
		    "L %#x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp2x.seq_id),
		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
		    le16_to_cpu(iocb->u.isp2x.lun));
		if (qlt_abort_task(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_RESOURCE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
		    "qla_target(%d): Out of resources, host %ld\n",
		    vha->vp_idx, vha->host_no);
		break;

	case IMM_NTFY_MSG_RX:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
		    "qla_target(%d): Immediate notify task %x\n",
		    vha->vp_idx, iocb->u.isp2x.task_flags);
		break;

	case IMM_NTFY_ELS:
		if (qlt_24xx_handle_els(vha, iocb) == 0)
			send_notify_ack = 0;
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
		    "qla_target(%d): Received unknown immediate "
		    "notify status %x\n", vha->vp_idx, status);
		break;
	}

	if (send_notify_ack)
5368 5369
		qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
		    0, 0);
5370 5371 5372 5373 5374 5375
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 * This function sends busy to ISP 2xxx or 24xx.
 */
5376
static int __qlt_send_busy(struct qla_qpair *qpair,
5377 5378
	struct atio_from_isp *atio, uint16_t status)
{
5379
	struct scsi_qla_host *vha = qpair->vha;
5380 5381 5382
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
5383
	struct fc_port *sess = NULL;
5384
	unsigned long flags;
5385
	u16 temp;
5386 5387 5388 5389 5390 5391
	port_id_t id;

	id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
	id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
	id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
	id.b.rsvd_1 = 0;
5392

5393
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5394
	sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5395
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5396
	if (!sess) {
5397
		qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5398
		return 0;
5399 5400 5401
	}
	/* Sending marker isn't necessary, since we called from ISR */

5402
	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5403
	if (!pkt) {
5404
		ql_dbg(ql_dbg_io, vha, 0x3063,
5405 5406
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
5407
		return -ENOMEM;
5408 5409
	}

5410
	qpair->tgt_counters.num_q_full_sent++;
5411 5412 5413 5414 5415 5416
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
	ctio24->nport_handle = sess->loop_id;
5417
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5418 5419 5420 5421 5422
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5423
	temp = (atio->u.isp24.attr << 9) |
5424
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5425 5426
		CTIO7_FLAGS_DONT_RET_CTIO;
	ctio24->u.status1.flags = cpu_to_le16(temp);
5427 5428 5429 5430 5431 5432
	/*
	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
	 * if the explicit conformation is used.
	 */
	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5433 5434 5435 5436 5437 5438

	ctio24->u.status1.residual = get_datalen_for_atio(atio);

	if (ctio24->u.status1.residual != 0)
		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;

5439 5440
	/* Memory Barrier */
	wmb();
5441 5442 5443 5444
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458
	return 0;
}

/*
 * This routine is used to allocate a command for either a QFull condition
 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
 * out previously.
 */
static void
qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull)
{
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_hw_data *ha = vha->hw;
5459
	struct fc_port *sess;
5460 5461
	struct se_session *se_sess;
	struct qla_tgt_cmd *cmd;
5462
	int tag, cpu;
5463
	unsigned long flags;
5464 5465 5466 5467 5468 5469 5470 5471 5472 5473

	if (unlikely(tgt->tgt_stop)) {
		ql_dbg(ql_dbg_io, vha, 0x300a,
			"New command while device %p is shutting down\n", tgt);
		return;
	}

	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5474 5475
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493
				vha->hw->tgt.num_qfull_cmds_dropped;

		ql_dbg(ql_dbg_io, vha, 0x3068,
			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
			vha->vp_idx, __func__,
			vha->hw->tgt.num_qfull_cmds_dropped);

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	sess = ha->tgt.tgt_ops->find_sess_by_s_id
		(vha, atio->u.isp24.fcp_hdr.s_id);
	if (!sess)
		return;

	se_sess = sess->se_sess;

5494
	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505
	if (tag < 0)
		return;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	if (!cmd) {
		ql_dbg(ql_dbg_io, vha, 0x3009,
			"qla_target(%d): %s: Allocation of cmd failed\n",
			vha->vp_idx, __func__);

		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5506 5507
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521
				vha->hw->tgt.num_qfull_cmds_dropped;

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	qlt_incr_num_pend_cmds(vha);
	INIT_LIST_HEAD(&cmd->cmd_list);
	memcpy(&cmd->atio, atio, sizeof(*atio));

	cmd->tgt = vha->vha_tgt.qla_tgt;
	cmd->vha = vha;
5522
	cmd->reset_count = ha->base_qpair->chip_reset;
5523
	cmd->q_full = 1;
5524
	cmd->qpair = ha->base_qpair;
5525
	cmd->se_cmd.map_cpu = cpu;
5526 5527 5528 5529 5530 5531 5532 5533

	if (qfull) {
		cmd->q_full = 1;
		/* NOTE: borrowing the state field to carry the status */
		cmd->state = status;
	} else
		cmd->term_exchg = 1;

5534
	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5535 5536 5537 5538
	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);

	vha->hw->tgt.num_qfull_cmds_alloc++;
	if (vha->hw->tgt.num_qfull_cmds_alloc >
5539 5540
		vha->qla_stats.stat_max_qfull_cmds_alloc)
		vha->qla_stats.stat_max_qfull_cmds_alloc =
5541
			vha->hw->tgt.num_qfull_cmds_alloc;
5542
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5543 5544 5545
}

int
5546
qlt_free_qfull_cmds(struct qla_qpair *qpair)
5547
{
5548
	struct scsi_qla_host *vha = qpair->vha;
5549 5550 5551
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
	struct qla_tgt_cmd *cmd, *tcmd;
5552
	struct list_head free_list, q_full_list;
5553 5554 5555 5556 5557 5558
	int rc = 0;

	if (list_empty(&ha->tgt.q_full_list))
		return 0;

	INIT_LIST_HEAD(&free_list);
5559
	INIT_LIST_HEAD(&q_full_list);
5560

5561
	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5562
	if (list_empty(&ha->tgt.q_full_list)) {
5563
		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5564 5565 5566
		return 0;
	}

5567 5568 5569 5570 5571
	list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);

	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5572 5573
		if (cmd->q_full)
			/* cmd->state is a borrowed field to hold status */
5574
			rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5575
		else if (cmd->term_exchg)
5576
			rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599

		if (rc == -ENOMEM)
			break;

		if (cmd->q_full)
			ql_dbg(ql_dbg_io, vha, 0x3006,
			    "%s: busy sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else if (cmd->term_exchg)
			ql_dbg(ql_dbg_io, vha, 0x3007,
			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else
			ql_dbg(ql_dbg_io, vha, 0x3008,
			    "%s: Unexpected cmd in QFull list %p\n", __func__,
			    cmd);

		list_del(&cmd->cmd_list);
		list_add_tail(&cmd->cmd_list, &free_list);

		/* piggy back on hardware_lock for protection */
		vha->hw->tgt.num_qfull_cmds_alloc--;
	}
5600
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5601 5602 5603 5604 5605 5606 5607 5608 5609 5610

	cmd = NULL;

	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
		list_del(&cmd->cmd_list);
		/* This cmd was never sent to TCM.  There is no need
		 * to schedule free or call free_cmd
		 */
		qlt_free_cmd(cmd);
	}
5611 5612 5613 5614 5615 5616 5617

	if (!list_empty(&q_full_list)) {
		spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
		list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
	}

5618 5619 5620 5621
	return rc;
}

static void
5622 5623
qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
    uint16_t status)
5624 5625
{
	int rc = 0;
5626
	struct scsi_qla_host *vha = qpair->vha;
5627

5628
	rc = __qlt_send_busy(qpair, atio, status);
5629 5630 5631 5632 5633
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, status, 1);
}

static int
5634 5635
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
	struct atio_from_isp *atio, uint8_t ha_locked)
5636 5637
{
	struct qla_hw_data *ha = vha->hw;
5638
	unsigned long flags;
5639 5640 5641 5642

	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
		return 0;

5643 5644
	if (!ha_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);
5645
	qlt_send_busy(qpair, atio, qla_sam_status);
5646 5647 5648
	if (!ha_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

5649
	return 1;
5650 5651 5652 5653 5654
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5655
	struct atio_from_isp *atio, uint8_t ha_locked)
5656 5657
{
	struct qla_hw_data *ha = vha->hw;
5658
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5659
	int rc;
5660
	unsigned long flags = 0;
5661 5662

	if (unlikely(tgt == NULL)) {
5663
		ql_dbg(ql_dbg_tgt, vha, 0x3064,
5664 5665 5666 5667 5668 5669 5670 5671
		    "ATIO pkt, but no tgt (ha %p)", ha);
		return;
	}
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

5672
	tgt->atio_irq_cmd_count++;
5673 5674 5675 5676 5677

	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
		if (unlikely(atio->u.isp24.exchange_addr ==
		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5678
			ql_dbg(ql_dbg_io, vha, 0x3065,
5679 5680 5681
			    "qla_target(%d): ATIO_TYPE7 "
			    "received with UNKNOWN exchange address, "
			    "sending QUEUE_FULL\n", vha->vp_idx);
5682 5683
			if (!ha_locked)
				spin_lock_irqsave(&ha->hardware_lock, flags);
5684
			qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5685
			if (!ha_locked)
5686 5687
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
5688 5689
			break;
		}
5690 5691

		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5692 5693
			rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
			    atio, ha_locked);
5694
			if (rc != 0) {
5695
				tgt->atio_irq_cmd_count--;
5696 5697
				return;
			}
5698
			rc = qlt_handle_cmd_for_atio(vha, atio);
5699
		} else {
5700
			rc = qlt_handle_task_mgmt(vha, atio);
5701
		}
5702
		if (unlikely(rc != 0)) {
5703 5704 5705 5706 5707 5708 5709 5710 5711 5712
			if (!ha_locked)
				spin_lock_irqsave(&ha->hardware_lock, flags);
			switch (rc) {
			case -ENODEV:
				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
				    "qla_target: Unable to send command to target\n");
				break;
			case -EBADF:
				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
				    "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5713 5714
				qlt_send_term_exchange(ha->base_qpair, NULL,
				    atio, 1, 0);
5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729
				break;
			case -EBUSY:
				ql_dbg(ql_dbg_tgt, vha, 0xe060,
				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
				    vha->vp_idx);
				qlt_send_busy(ha->base_qpair, atio,
				    tc_sam_status);
				break;
			default:
				ql_dbg(ql_dbg_tgt, vha, 0xe060,
				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
				    vha->vp_idx);
				qlt_send_busy(ha->base_qpair, atio,
				    qla_sam_status);
				break;
5730
			}
5731 5732 5733
			if (!ha_locked)
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747
		}
		break;

	case IMMED_NOTIFY_TYPE:
	{
		if (unlikely(atio->u.isp2x.entry_status != 0)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
			    "qla_target(%d): Received ATIO packet %x "
			    "with error status %x\n", vha->vp_idx,
			    atio->u.raw.entry_type,
			    atio->u.isp2x.entry_status);
			break;
		}
		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5748 5749 5750

		if (!ha_locked)
			spin_lock_irqsave(&ha->hardware_lock, flags);
5751
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5752 5753
		if (!ha_locked)
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5754 5755 5756 5757 5758 5759 5760 5761 5762 5763
		break;
	}

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

5764
	tgt->atio_irq_cmd_count--;
5765 5766
}

5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812
/*
 * qpair lock is assume to be held
 * rc = 0 : send terminate & abts respond
 * rc != 0: do not send term & abts respond
 */
static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
    struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
{
	struct qla_hw_data *ha = vha->hw;
	int rc = 0;

	/*
	 * Detect unresolved exchange. If the same ABTS is unable
	 * to terminate an existing command and the same ABTS loops
	 * between FW & Driver, then force FW dump. Under 1 jiff,
	 * we should see multiple loops.
	 */
	if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
	    qpair->retry_term_jiff == jiffies) {
		/* found existing exchange */
		qpair->retry_term_cnt++;
		if (qpair->retry_term_cnt >= 5) {
			rc = EIO;
			qpair->retry_term_cnt = 0;
			ql_log(ql_log_warn, vha, 0xffff,
			    "Unable to send ABTS Respond. Dumping firmware.\n");
			ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
			    vha, 0xffff, (uint8_t *)entry, sizeof(*entry));

			if (qpair == ha->base_qpair)
				ha->isp_ops->fw_dump(vha, 1);
			else
				ha->isp_ops->fw_dump(vha, 0);

			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
			qla2xxx_wake_dpc(vha);
		}
	} else if (qpair->retry_term_jiff != jiffies) {
		qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
		qpair->retry_term_cnt = 0;
		qpair->retry_term_jiff = jiffies;
	}

	return rc;
}

5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861

static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
{
	struct abts_resp_from_24xx_fw *entry =
		(struct abts_resp_from_24xx_fw *)pkt;
	u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
	struct qla_tgt_mgmt_cmd *mcmd;
	struct qla_hw_data *ha = vha->hw;

	mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
	    pkt->handle, pkt);
	if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
		ql_dbg(ql_dbg_async, vha, 0xe064,
		    "qla_target(%d): ABTS Comp without mcmd\n",
		    vha->vp_idx);
		return;
	}

	if (mcmd)
		vha  = mcmd->vha;
	vha->vha_tgt.qla_tgt->abts_resp_expected--;

	ql_dbg(ql_dbg_tgt, vha, 0xe038,
	    "ABTS_RESP_24XX: compl_status %x\n",
	    entry->compl_status);

	if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
		if ((entry->error_subcode1 == 0x1E) &&
		    (entry->error_subcode2 == 0)) {
			if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
				ha->tgt.tgt_ops->free_mcmd(mcmd);
				return;
			}
			qlt_24xx_retry_term_exchange(vha, rsp->qpair,
			    pkt, mcmd);
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe063,
			    "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
			    vha->vp_idx, entry->compl_status,
			    entry->error_subcode1,
			    entry->error_subcode2);
			ha->tgt.tgt_ops->free_mcmd(mcmd);
		}
	} else {
		ha->tgt.tgt_ops->free_mcmd(mcmd);
	}
}

5862 5863
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
5864 5865
static void qlt_response_pkt(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
5866
{
5867
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5868 5869 5870

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5871 5872
		    "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
		    vha->vp_idx, pkt->entry_type, vha->hw);
5873 5874 5875 5876 5877 5878 5879 5880 5881
		return;
	}

	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	switch (pkt->entry_type) {
5882
	case CTIO_CRC2:
5883 5884 5885
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5886
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5887 5888 5889 5890 5891 5892 5893 5894 5895 5896
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case ACCEPT_TGT_IO_TYPE:
	{
		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
		int rc;
		if (atio->u.isp2x.status !=
5897
		    cpu_to_le16(ATIO_CDB_VALID)) {
5898 5899 5900 5901 5902 5903 5904
			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
			    "qla_target(%d): ATIO with error "
			    "status %x received\n", vha->vp_idx,
			    le16_to_cpu(atio->u.isp2x.status));
			break;
		}

5905
		rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5906
		if (rc != 0)
5907 5908
			return;

5909 5910
		rc = qlt_handle_cmd_for_atio(vha, atio);
		if (unlikely(rc != 0)) {
5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935
			switch (rc) {
			case -ENODEV:
				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
				    "qla_target: Unable to send command to target\n");
				break;
			case -EBADF:
				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
				    "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
				qlt_send_term_exchange(rsp->qpair, NULL,
				    atio, 1, 0);
				break;
			case -EBUSY:
				ql_dbg(ql_dbg_tgt, vha, 0xe060,
				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
				    vha->vp_idx);
				qlt_send_busy(rsp->qpair, atio,
				    tc_sam_status);
				break;
			default:
				ql_dbg(ql_dbg_tgt, vha, 0xe060,
				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
				    vha->vp_idx);
				qlt_send_busy(rsp->qpair, atio,
				    qla_sam_status);
				break;
5936 5937 5938 5939 5940 5941 5942 5943
			}
		}
	}
	break;

	case CONTINUE_TGT_IO_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5944
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5945 5946 5947 5948 5949 5950 5951 5952
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case CTIO_A64_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5953
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case IMMED_NOTIFY_TYPE:
		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
		break;

	case NOTIFY_ACK_TYPE:
		if (tgt->notify_ack_expected > 0) {
			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe036,
			    "NOTIFY_ACK seq %08x status %x\n",
			    le16_to_cpu(entry->u.isp2x.seq_id),
			    le16_to_cpu(entry->u.isp2x.status));
			tgt->notify_ack_expected--;
			if (entry->u.isp2x.status !=
5973
			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993
				ql_dbg(ql_dbg_tgt, vha, 0xe061,
				    "qla_target(%d): NOTIFY_ACK "
				    "failed %x\n", vha->vp_idx,
				    le16_to_cpu(entry->u.isp2x.status));
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe062,
			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
			    vha->vp_idx);
		}
		break;

	case ABTS_RECV_24XX:
		ql_dbg(ql_dbg_tgt, vha, 0xe037,
		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
		break;

	case ABTS_RESP_24XX:
		if (tgt->abts_resp_expected > 0) {
5994
			qlt_handle_abts_completion(vha, rsp, pkt);
5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe064,
			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
			    "received\n", vha->vp_idx);
		}
		break;

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe065,
		    "qla_target(%d): Received unknown response pkt "
		    "type %x\n", vha->vp_idx, pkt->entry_type);
		break;
	}

}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
	uint16_t *mailbox)
{
	struct qla_hw_data *ha = vha->hw;
6018
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6019
	int login_code;
6020

6021
	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039
		return;

	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
	    IS_QLA2100(ha))
		return;
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */


	switch (code) {
	case MBA_RESET:			/* Reset */
	case MBA_SYSTEM_ERR:		/* System Error */
	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
		    "qla_target(%d): System error async event %#x "
6040
		    "occurred", vha->vp_idx, code);
6041 6042 6043 6044 6045 6046 6047 6048
		break;
	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		break;

	case MBA_LOOP_UP:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6049
		    "qla_target(%d): Async LOOP_UP occurred "
6050 6051 6052
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
6053
		if (tgt->link_reinit_iocb_pending) {
6054 6055
			qlt_send_notify_ack(ha->base_qpair,
			    (void *)&tgt->link_reinit_iocb,
6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
		break;
	}

	case MBA_LIP_OCCURRED:
	case MBA_LOOP_DOWN:
	case MBA_LIP_RESET:
	case MBA_RSCN_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6067
		    "qla_target(%d): Async event %#x occurred "
6068 6069 6070
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
6071 6072
		break;

6073
	case MBA_REJECTED_FCP_CMD:
6074 6075 6076 6077 6078
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
		    vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
6079 6080 6081 6082 6083

		if (le16_to_cpu(mailbox[3]) == 1) {
			/* exchange starvation. */
			vha->hw->exch_starvation++;
			if (vha->hw->exch_starvation > 5) {
6084
				ql_log(ql_log_warn, vha, 0xd03a,
6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098
				    "Exchange starvation-. Resetting RISC\n");

				vha->hw->exch_starvation = 0;
				if (IS_P3P_TYPE(vha->hw))
					set_bit(FCOE_CTX_RESET_NEEDED,
					    &vha->dpc_flags);
				else
					set_bit(ISP_ABORT_NEEDED,
					    &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
		}
		break;

6099 6100 6101
	case MBA_PORT_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
		    "qla_target(%d): Port update async event %#x "
6102
		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
6103 6104 6105 6106 6107
		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));

		login_code = le16_to_cpu(mailbox[2]);
6108
		if (login_code == 0x4) {
6109 6110
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
			    "Async MB 2: Got PLOGI Complete\n");
6111 6112
			vha->hw->exch_starvation = 0;
		} else if (login_code == 0x7)
6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
			    "Async MB 2: Port Logged Out\n");
		break;
	default:
		break;
	}

}

static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
	uint16_t loop_id)
{
6125
	fc_port_t *fcport, *tfcp, *del;
6126
	int rc;
6127 6128
	unsigned long flags;
	u8 newfcport = 0;
6129

6130
	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6131 6132 6133 6134 6135 6136 6137 6138 6139
	if (!fcport) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
		    "qla_target(%d): Allocation of tmp FC port failed",
		    vha->vp_idx);
		return NULL;
	}

	fcport->loop_id = loop_id;

6140
	rc = qla24xx_gpdb_wait(vha, fcport, 0);
6141 6142 6143 6144 6145 6146 6147 6148 6149
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
		    "qla_target(%d): Failed to retrieve fcport "
		    "information -- get_port_database() returned %x "
		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
		kfree(fcport);
		return NULL;
	}

6150 6151 6152 6153 6154 6155 6156 6157 6158
	del = NULL;
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);

	if (tfcp) {
		tfcp->d_id = fcport->d_id;
		tfcp->port_type = fcport->port_type;
		tfcp->supported_classes = fcport->supported_classes;
		tfcp->flags |= fcport->flags;
6159
		tfcp->scan_state = QLA_FCPORT_FOUND;
6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183

		del = fcport;
		fcport = tfcp;
	} else {
		if (vha->hw->current_topology == ISP_CFG_F)
			fcport->flags |= FCF_FABRIC_DEVICE;

		list_add_tail(&fcport->list, &vha->vp_fcports);
		if (!IS_SW_RESV_ADDR(fcport->d_id))
		   vha->fcport_count++;
		fcport->login_gen++;
		fcport->disc_state = DSC_LOGIN_COMPLETE;
		fcport->login_succ = 1;
		newfcport = 1;
	}

	fcport->deleted = 0;
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

	switch (vha->host->active_mode) {
	case MODE_INITIATOR:
	case MODE_DUAL:
		if (newfcport) {
			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6184
				qla24xx_sched_upd_fcport(fcport);
6185
			} else {
6186
				ql_dbg(ql_dbg_disc, vha, 0x20ff,
6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200
				   "%s %d %8phC post gpsc fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_gpsc_work(vha, fcport);
			}
		}
		break;

	case MODE_TARGET:
	default:
		break;
	}
	if (del)
		qla2x00_free_fcport(del);

6201 6202 6203 6204
	return fcport;
}

/* Must be called under tgt_mutex */
6205
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6206 6207
	uint8_t *s_id)
{
6208
	struct fc_port *sess = NULL;
6209 6210 6211 6212
	fc_port_t *fcport = NULL;
	int rc, global_resets;
	uint16_t loop_id = 0;

6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223
	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
		/*
		 * This is Domain Controller, so it should be
		 * OK to drop SCSI commands from it.
		 */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
		    "Unable to find initiator with S_ID %x:%x:%x",
		    s_id[0], s_id[1], s_id[2]);
		return NULL;
	}

6224 6225
	mutex_lock(&vha->vha_tgt.tgt_mutex);

6226
retry:
6227 6228
	global_resets =
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6229 6230 6231

	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
	if (rc != 0) {
6232 6233
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

6234 6235 6236 6237 6238
		ql_log(ql_log_info, vha, 0xf071,
		    "qla_target(%d): Unable to find "
		    "initiator with S_ID %x:%x:%x",
		    vha->vp_idx, s_id[0], s_id[1],
		    s_id[2]);
6239 6240 6241 6242 6243 6244 6245 6246

		if (rc == -ENOENT) {
			qlt_port_logo_t logo;
			sid_to_portid(s_id, &logo.id);
			logo.cmd_count = 1;
			qlt_send_first_logo(vha, &logo);
		}

6247 6248 6249 6250
		return NULL;
	}

	fcport = qlt_get_port_database(vha, loop_id);
6251 6252
	if (!fcport) {
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
6253
		return NULL;
6254
	}
6255 6256

	if (global_resets !=
6257
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6258 6259 6260 6261
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
		    "qla_target(%d): global reset during session discovery "
		    "(counter was %d, new %d), retrying", vha->vp_idx,
		    global_resets,
6262 6263
		    atomic_read(&vha->vha_tgt.
			qla_tgt->tgt_global_resets_count));
6264 6265 6266 6267 6268
		goto retry;
	}

	sess = qlt_create_sess(vha, fcport, true);

6269 6270
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

6271 6272 6273 6274 6275 6276 6277 6278
	return sess;
}

static void qlt_abort_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
6279
	struct fc_port *sess = NULL;
6280
	unsigned long flags = 0, flags2 = 0;
6281 6282 6283 6284
	uint32_t be_s_id;
	uint8_t s_id[3];
	int rc;

6285
	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6286 6287

	if (tgt->tgt_stop)
6288
		goto out_term2;
6289 6290 6291 6292 6293 6294 6295 6296

	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];

	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    (unsigned char *)&be_s_id);
	if (!sess) {
6297
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6298 6299 6300 6301

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

6302
		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6303
		if (!sess)
6304
			goto out_term2;
6305
	} else {
6306
		if (sess->deleted) {
6307
			sess = NULL;
6308
			goto out_term2;
6309 6310
		}

6311
		if (!kref_get_unless_zero(&sess->sess_kref)) {
6312
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6313 6314 6315 6316 6317
			    "%s: kref_get fail %8phC \n",
			     __func__, sess->port_name);
			sess = NULL;
			goto out_term2;
		}
6318 6319 6320
	}

	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6321 6322 6323
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);

6324 6325 6326 6327
	if (rc != 0)
		goto out_term;
	return;

6328
out_term2:
6329 6330 6331
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6332

6333
out_term:
6334
	spin_lock_irqsave(&ha->hardware_lock, flags);
6335 6336
	qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
	    FCP_TMF_REJECTED, false);
6337
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6338 6339 6340 6341 6342 6343 6344 6345
}

static void qlt_tmr_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct atio_from_isp *a = &prm->tm_iocb2;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
6346
	struct fc_port *sess = NULL;
6347 6348 6349
	unsigned long flags;
	uint8_t *s_id = NULL; /* to hide compiler warnings */
	int rc;
6350
	u64 unpacked_lun;
6351
	int fn;
6352 6353
	void *iocb;

6354
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6355 6356

	if (tgt->tgt_stop)
6357
		goto out_term2;
6358 6359 6360 6361

	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
6362
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6363 6364 6365 6366

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

6367
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6368
		if (!sess)
6369
			goto out_term2;
6370
	} else {
6371
		if (sess->deleted) {
6372
			sess = NULL;
6373
			goto out_term2;
6374 6375
		}

6376
		if (!kref_get_unless_zero(&sess->sess_kref)) {
6377
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6378 6379 6380
			    "%s: kref_get fail %8phC\n",
			     __func__, sess->port_name);
			sess = NULL;
6381
			goto out_term2;
6382
		}
6383 6384 6385 6386
	}

	iocb = a;
	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6387 6388
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6389 6390

	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6391
	ha->tgt.tgt_ops->put_sess(sess);
6392
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6393 6394 6395

	if (rc != 0)
		goto out_term;
6396 6397
	return;

6398 6399 6400 6401
out_term2:
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6402
out_term:
6403
	qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450
}

static void qlt_sess_work_fn(struct work_struct *work)
{
	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
	struct scsi_qla_host *vha = tgt->vha;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		struct qla_tgt_sess_work_param *prm = list_entry(
		    tgt->sess_works_list.next, typeof(*prm),
		    sess_works_list_entry);

		/*
		 * This work can be scheduled on several CPUs at time, so we
		 * must delete the entry to eliminate double processing
		 */
		list_del(&prm->sess_works_list_entry);

		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

		switch (prm->type) {
		case QLA_TGT_SESS_WORK_ABORT:
			qlt_abort_work(tgt, prm);
			break;
		case QLA_TGT_SESS_WORK_TM:
			qlt_tmr_work(tgt, prm);
			break;
		default:
			BUG_ON(1);
			break;
		}

		spin_lock_irqsave(&tgt->sess_work_lock, flags);

		kfree(prm);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
}

/* Must be called under tgt_host_action_mutex */
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
	struct qla_tgt *tgt;
6451 6452
	int rc, i;
	struct qla_qpair_hint *h;
6453 6454 6455 6456

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

6457 6458 6459 6460 6461 6462
	if (!IS_TGT_MODE_CAPABLE(ha)) {
		ql_log(ql_log_warn, base_vha, 0xe070,
		    "This adapter does not support target mode.\n");
		return 0;
	}

6463
	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6464
	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6465

6466
	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6467 6468 6469 6470 6471 6472 6473 6474

	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
	if (!tgt) {
		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
		    "Unable to allocate struct qla_tgt\n");
		return -ENOMEM;
	}

K
Kees Cook 已提交
6475 6476 6477
	tgt->qphints = kcalloc(ha->max_qpairs + 1,
			       sizeof(struct qla_qpair_hint),
			       GFP_KERNEL);
6478 6479 6480 6481 6482 6483 6484
	if (!tgt->qphints) {
		kfree(tgt);
		ql_log(ql_log_warn, base_vha, 0x0197,
		    "Unable to allocate qpair hints.\n");
		return -ENOMEM;
	}

6485 6486 6487
	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
		base_vha->host->hostt->supported_mode |= MODE_TARGET;

6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516
	rc = btree_init64(&tgt->lun_qpair_map);
	if (rc) {
		kfree(tgt->qphints);
		kfree(tgt);
		ql_log(ql_log_info, base_vha, 0x0198,
			"Unable to initialize lun_qpair_map btree\n");
		return -EIO;
	}
	h = &tgt->qphints[0];
	h->qpair = ha->base_qpair;
	INIT_LIST_HEAD(&h->hint_elem);
	h->cpuid = ha->base_qpair->cpuid;
	list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);

	for (i = 0; i < ha->max_qpairs; i++) {
		unsigned long flags;

		struct qla_qpair *qpair = ha->queue_pair_map[i];
		h = &tgt->qphints[i + 1];
		INIT_LIST_HEAD(&h->hint_elem);
		if (qpair) {
			h->qpair = qpair;
			spin_lock_irqsave(qpair->qp_lock_ptr, flags);
			list_add_tail(&h->hint_elem, &qpair->hints_list);
			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
			h->cpuid = qpair->cpuid;
		}
	}

6517 6518 6519 6520 6521 6522 6523 6524 6525
	tgt->ha = ha;
	tgt->vha = base_vha;
	init_waitqueue_head(&tgt->waitQ);
	INIT_LIST_HEAD(&tgt->del_sess_list);
	spin_lock_init(&tgt->sess_work_lock);
	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
	INIT_LIST_HEAD(&tgt->sess_works_list);
	atomic_set(&tgt->tgt_global_resets_count, 0);

6526
	base_vha->vha_tgt.qla_tgt = tgt;
6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537

	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
		"qla_target(%d): using 64 Bit PCI addressing",
		base_vha->vp_idx);
	/* 3 is reserved */
	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);

	mutex_lock(&qla_tgt_mutex);
	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
	mutex_unlock(&qla_tgt_mutex);

6538 6539 6540
	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
		ha->tgt.tgt_ops->add_target(base_vha);

6541 6542 6543 6544 6545 6546
	return 0;
}

/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
6547
	if (!vha->vha_tgt.qla_tgt)
6548 6549
		return 0;

6550 6551 6552 6553
	if (vha->fc_vport) {
		qlt_release(vha->vha_tgt.qla_tgt);
		return 0;
	}
6554 6555 6556 6557

	/* free left over qfull cmds */
	qlt_init_term_exchange(vha);

6558 6559
	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
	    vha->host_no, ha);
6560
	qlt_release(vha->vha_tgt.qla_tgt);
6561 6562 6563 6564

	return 0;
}

6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575
void qlt_remove_target_resources(struct qla_hw_data *ha)
{
	struct scsi_qla_host *node;
	u32 key = 0;

	btree_for_each_safe32(&ha->tgt.host_map, key, node)
		btree_remove32(&ha->tgt.host_map, key);

	btree_destroy32(&ha->tgt.host_map);
}

6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
	unsigned char *b)
{
	int i;

	pr_debug("qla2xxx HW vha->node_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->node_name[i]);
	pr_debug("\n");
	pr_debug("qla2xxx HW vha->port_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->port_name[i]);
	pr_debug("\n");

	pr_debug("qla2xxx passed configfs WWPN: ");
	put_unaligned_be64(wwpn, b);
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", b[i]);
	pr_debug("\n");
}

/**
 * qla_tgt_lport_register - register lport with external module
 *
 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6601 6602 6603 6604
 * @phys_wwpn:
 * @npiv_wwpn:
 * @npiv_wwnn:
 * @callback:  lport initialization callback for tcm_qla2xxx code
6605
 */
6606 6607 6608
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
		       u64 npiv_wwpn, u64 npiv_wwnn,
		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629
{
	struct qla_tgt *tgt;
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha;
	struct Scsi_Host *host;
	unsigned long flags;
	int rc;
	u8 b[WWN_SIZE];

	mutex_lock(&qla_tgt_mutex);
	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
		vha = tgt->vha;
		ha = vha->hw;

		host = vha->host;
		if (!host)
			continue;

		if (!(host->hostt->supported_mode & MODE_TARGET))
			continue;

6630 6631 6632
		if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
			continue;

6633
		spin_lock_irqsave(&ha->hardware_lock, flags);
6634
		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6635 6636 6637 6638 6639
			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
			    host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6640 6641 6642 6643 6644 6645
		if (tgt->tgt_stop) {
			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
				 host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6646 6647 6648 6649 6650 6651 6652 6653
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

		if (!scsi_host_get(host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe068,
			    "Unable to scsi_host_get() for"
			    " qla2xxx scsi_host\n");
			continue;
		}
6654
		qlt_lport_dump(vha, phys_wwpn, b);
6655 6656 6657 6658 6659

		if (memcmp(vha->port_name, b, WWN_SIZE)) {
			scsi_host_put(host);
			continue;
		}
6660 6661 6662 6663
		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
		if (rc != 0)
			scsi_host_put(host);

6664
		mutex_unlock(&qla_tgt_mutex);
6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684
		return rc;
	}
	mutex_unlock(&qla_tgt_mutex);

	return -ENODEV;
}
EXPORT_SYMBOL(qlt_lport_register);

/**
 * qla_tgt_lport_deregister - Degister lport
 *
 * @vha:  Registered scsi_qla_host pointer
 */
void qlt_lport_deregister(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	struct Scsi_Host *sh = vha->host;
	/*
	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
	 */
6685
	vha->vha_tgt.target_lport_ptr = NULL;
6686 6687 6688 6689 6690 6691 6692 6693 6694
	ha->tgt.tgt_ops = NULL;
	/*
	 * Release the Scsi_Host reference for the underlying qla2xxx host
	 */
	scsi_host_put(sh);
}
EXPORT_SYMBOL(qlt_lport_deregister);

/* Must be called under HW lock */
6695
void qlt_set_mode(struct scsi_qla_host *vha)
6696
{
6697
	switch (vha->qlini_mode) {
6698 6699 6700 6701 6702
	case QLA2XXX_INI_MODE_DISABLED:
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_TARGET;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6703
		vha->host->active_mode = MODE_INITIATOR;
6704 6705 6706
		break;
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_DUAL;
6707 6708 6709 6710 6711 6712 6713
		break;
	default:
		break;
	}
}

/* Must be called under HW lock */
6714
static void qlt_clear_mode(struct scsi_qla_host *vha)
6715
{
6716
	switch (vha->qlini_mode) {
6717 6718 6719 6720 6721 6722 6723
	case QLA2XXX_INI_MODE_DISABLED:
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_INITIATOR;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6724 6725
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_INITIATOR;
6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740
		break;
	default:
		break;
	}
}

/*
 * qla_tgt_enable_vha - NO LOCK HELD
 *
 * host_reset, bring up w/ Target Mode Enabled
 */
void
qlt_enable_vha(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
6741
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6742
	unsigned long flags;
6743
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6744 6745 6746 6747 6748 6749 6750 6751

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe069,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}
6752 6753
	if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
		return;
6754 6755 6756 6757 6758 6759

	spin_lock_irqsave(&ha->hardware_lock, flags);
	tgt->tgt_stopped = 0;
	qlt_set_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

6760 6761 6762
	mutex_lock(&ha->optrom_mutex);
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
	    "%s.\n", __func__);
6763 6764 6765 6766 6767 6768 6769 6770
	if (vha->vp_idx) {
		qla24xx_disable_vp(vha);
		qla24xx_enable_vp(vha);
	} else {
		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
		qla2xxx_wake_dpc(base_vha);
		qla2x00_wait_for_hba_online(base_vha);
	}
6771
	mutex_unlock(&ha->optrom_mutex);
6772 6773 6774 6775 6776 6777 6778 6779
}
EXPORT_SYMBOL(qlt_enable_vha);

/*
 * qla_tgt_disable_vha - NO LOCK HELD
 *
 * Disable Target Mode and reset the adapter
 */
6780
static void qlt_disable_vha(struct scsi_qla_host *vha)
6781 6782
{
	struct qla_hw_data *ha = vha->hw;
6783
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810
	unsigned long flags;

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_clear_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
	qla2xxx_wake_dpc(vha);
	qla2x00_wait_for_hba_online(vha);
}

/*
 * Called from qla_init.c:qla24xx_vport_create() contex to setup
 * the target mode specific struct scsi_qla_host and struct qla_hw_data
 * members.
 */
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
6811 6812 6813 6814
	vha->vha_tgt.qla_tgt = NULL;

	mutex_init(&vha->vha_tgt.tgt_mutex);
	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6815 6816 6817 6818 6819 6820 6821 6822 6823 6824

	qlt_clear_mode(vha);

	/*
	 * NOTE: Currently the value is kept the same for <24xx and
	 * >=24xx ISPs. If it is necessary to change it,
	 * the check should be added for specific ISPs,
	 * assigning the value appropriately.
	 */
	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6825 6826

	qlt_add_target(ha, vha);
6827 6828
}

6829 6830
u8
qlt_rff_id(struct scsi_qla_host *vha)
6831
{
6832
	u8 fc4_feature = 0;
6833 6834 6835 6836
	/*
	 * FC-4 Feature bit 0 indicates target functionality to the name server.
	 */
	if (qla_tgt_mode_enabled(vha)) {
6837
		fc4_feature = BIT_0;
6838
	} else if (qla_ini_mode_enabled(vha)) {
6839
		fc4_feature = BIT_1;
6840
	} else if (qla_dual_mode_enabled(vha))
6841 6842 6843
		fc4_feature = BIT_0 | BIT_1;

	return fc4_feature;
6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861
}

/*
 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
 * @ha: HA context
 *
 * Beginning of ATIO ring has initialization control block already built
 * by nvram config routine.
 *
 * Returns 0 on success.
 */
void
qlt_init_atio_q_entries(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t cnt;
	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;

6862
	if (qla_ini_mode_enabled(vha))
6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876
		return;

	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
		pkt->u.raw.signature = ATIO_PROCESSED;
		pkt++;
	}

}

/*
 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
 * @ha: SCSI driver HA context
 */
void
6877
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6878 6879 6880 6881 6882
{
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *pkt;
	int cnt, i;

6883
	if (!ha->flags.fw_started)
6884 6885
		return;

6886 6887
	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6888 6889 6890
		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		cnt = pkt->u.raw.entry_count;

6891 6892 6893 6894 6895 6896
		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
			/*
			 * This packet is corrupted. The header + payload
			 * can not be trusted. There is no point in passing
			 * it further up.
			 */
6897
			ql_log(ql_log_warn, vha, 0xd03c,
6898 6899 6900 6901 6902 6903
			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
			    pkt->u.isp24.fcp_hdr.s_id,
			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);

			adjust_corrupted_atio(pkt);
6904 6905
			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
			    ha_locked, 0);
6906 6907 6908 6909
		} else {
			qlt_24xx_atio_pkt_all_vps(vha,
			    (struct atio_from_isp *)pkt, ha_locked);
		}
6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925

		for (i = 0; i < cnt; i++) {
			ha->tgt.atio_ring_index++;
			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
				ha->tgt.atio_ring_index = 0;
				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
			} else
				ha->tgt.atio_ring_ptr++;

			pkt->u.raw.signature = ATIO_PROCESSED;
			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		}
		wmb();
	}

	/* Adjust ring index */
6926
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6927 6928 6929
}

void
6930
qlt_24xx_config_rings(struct scsi_qla_host *vha)
6931 6932
{
	struct qla_hw_data *ha = vha->hw;
6933 6934 6935
	struct qla_msix_entry *msix = &ha->msix_entries[2];
	struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;

6936 6937
	if (!QLA_TGT_MODE_ENABLED())
		return;
6938

6939 6940 6941 6942
	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));

6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964
	if (ha->flags.msix_enabled) {
		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
			if (IS_QLA2071(ha)) {
				/* 4 ports Baker: Enable Interrupt Handshake */
				icb->msix_atio = 0;
				icb->firmware_options_2 |= BIT_26;
			} else {
				icb->msix_atio = cpu_to_le16(msix->entry);
				icb->firmware_options_2 &= ~BIT_26;
			}
			ql_dbg(ql_dbg_init, vha, 0xf072,
			    "Registering ICB vector 0x%x for atio que.\n",
			    msix->entry);
		}
	} else {
		/* INTx|MSI */
		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
			icb->msix_atio = 0;
			icb->firmware_options_2 |= BIT_26;
			ql_dbg(ql_dbg_init, vha, 0xf072,
			    "%s: Use INTx for ATIOQ.\n", __func__);
		}
6965 6966 6967 6968 6969 6970 6971
	}
}

void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
	struct qla_hw_data *ha = vha->hw;
6972
	u32 tmp;
6973 6974 6975

	if (!QLA_TGT_MODE_ENABLED())
		return;
6976

6977
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6990
		if (qla_tgt_mode_enabled(vha))
6991
			nv->exchange_count = cpu_to_le16(0xFFFF);
6992
		else			/* dual */
6993
			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6994 6995

		/* Enable target mode */
6996
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6997 6998

		/* Disable ini mode, if requested */
6999
		if (qla_tgt_mode_enabled(vha))
7000
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7001 7002

		/* Disable Full Login after LIP */
7003
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7004
		/* Enable initial LIP */
7005
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7006 7007 7008 7009 7010 7011 7012
		if (ql2xtgt_tape_enable)
			/* Enable FC Tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC Tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

7013
		/* Disable Full Login after LIP */
7014
		nv->host_p &= cpu_to_le32(~BIT_10);
7015 7016 7017 7018 7019 7020 7021

		/*
		 * clear BIT 15 explicitly as we have seen at least
		 * a couple of instances where this was set and this
		 * was causing the firmware to not be initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7022
		/* Enable target PRLI control */
7023
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7024 7025 7026 7027 7028 7029 7030 7031

		if (IS_QLA25XX(ha)) {
			/* Change Loop-prefer to Pt-Pt */
			tmp = ~(BIT_4|BIT_5|BIT_6);
			nv->firmware_options_2 &= cpu_to_le32(tmp);
			tmp = P2P << 4;
			nv->firmware_options_2 |= cpu_to_le32(tmp);
		}
7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

7045
	if (ha->base_qpair->enable_class_2) {
7046 7047 7048 7049
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

7050
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7051 7052 7053 7054
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

7055
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7056 7057 7058 7059 7060 7061 7062 7063 7064
	}
}

void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_24xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

Q
Quinn Tran 已提交
7065 7066 7067
	if (!QLA_TGT_MODE_ENABLED())
		return;

7068 7069
	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7070
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7071 7072 7073
	}
}

7074 7075 7076 7077
void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
	struct qla_hw_data *ha = vha->hw;
7078
	u32 tmp;
7079 7080 7081 7082

	if (!QLA_TGT_MODE_ENABLED())
		return;

7083
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

7096
		if (qla_tgt_mode_enabled(vha))
7097
			nv->exchange_count = cpu_to_le16(0xFFFF);
7098
		else			/* dual */
7099
			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7100 7101

		/* Enable target mode */
7102
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
7103 7104

		/* Disable ini mode, if requested */
7105
		if (qla_tgt_mode_enabled(vha))
7106
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7107
		/* Disable Full Login after LIP */
7108
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7109
		/* Enable initial LIP */
7110
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7111 7112 7113 7114 7115 7116 7117
		/*
		 * clear BIT 15 explicitly as we have seen at
		 * least a couple of instances where this was set
		 * and this was causing the firmware to not be
		 * initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7118 7119 7120 7121 7122 7123 7124
		if (ql2xtgt_tape_enable)
			/* Enable FC tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

7125
		/* Disable Full Login after LIP */
7126
		nv->host_p &= cpu_to_le32(~BIT_10);
7127
		/* Enable target PRLI control */
7128
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7129 7130 7131 7132 7133 7134

		/* Change Loop-prefer to Pt-Pt */
		tmp = ~(BIT_4|BIT_5|BIT_6);
		nv->firmware_options_2 &= cpu_to_le32(tmp);
		tmp = P2P << 4;
		nv->firmware_options_2 |= cpu_to_le32(tmp);
7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

7148
	if (ha->base_qpair->enable_class_2) {
7149 7150 7151 7152
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

7153
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7154 7155 7156 7157
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

7158
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172
	}
}

void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_81xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7173
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185
	}
}

void
qlt_83xx_iospace_config(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	ha->msix_count += 1; /* For ATIO Q */
}

7186 7187 7188 7189 7190

void
qlt_modify_vp_config(struct scsi_qla_host *vha,
	struct vp_config_entry_24xx *vpmod)
{
7191 7192
	/* enable target mode.  Bit5 = 1 => disable */
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7193
		vpmod->options_idx1 &= ~BIT_5;
7194

7195
	/* Disable ini mode, if requested.  bit4 = 1 => disable */
7196
	if (qla_tgt_mode_enabled(vha))
7197 7198 7199 7200 7201 7202
		vpmod->options_idx1 &= ~BIT_4;
}

void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
7203 7204
	int rc;

7205 7206 7207
	if (!QLA_TGT_MODE_ENABLED())
		return;

7208
	if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
7209 7210 7211 7212 7213 7214 7215
		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
	} else {
		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
	}

7216 7217
	mutex_init(&base_vha->vha_tgt.tgt_mutex);
	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7218 7219 7220 7221 7222

	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
	    qlt_unknown_atio_work_fn);

7223
	qlt_clear_mode(base_vha);
7224 7225 7226

	rc = btree_init32(&ha->tgt.host_map);
	if (rc)
7227
		ql_log(ql_log_info, base_vha, 0xd03d,
7228 7229 7230
		    "Unable to initialize ha->host_map btree\n");

	qlt_update_vp_map(base_vha, SET_VP_IDX);
7231 7232
}

7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244
irqreturn_t
qla83xx_msix_atio_q(int irq, void *dev_id)
{
	struct rsp_que *rsp;
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	unsigned long flags;

	rsp = (struct rsp_que *) dev_id;
	ha = rsp->hw;
	vha = pci_get_drvdata(ha->pdev);

7245
	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7246

7247
	qlt_24xx_process_atio_queue(vha, 0);
7248

7249
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7250 7251 7252 7253

	return IRQ_HANDLED;
}

7254 7255 7256 7257 7258 7259 7260 7261 7262
static void
qlt_handle_abts_recv_work(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
		struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

7263 7264
	if (qla2x00_reset_active(vha) ||
	    (op->chip_reset != ha->base_qpair->chip_reset))
7265 7266 7267 7268 7269 7270 7271
		return;

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
	qlt_24xx_process_atio_queue(vha, 0);
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
7272
	qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7273
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7274 7275

	kfree(op);
7276 7277 7278
}

void
7279 7280
qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
    response_t *pkt)
7281 7282 7283 7284 7285 7286 7287 7288 7289
{
	struct qla_tgt_sess_op *op;

	op = kzalloc(sizeof(*op), GFP_ATOMIC);

	if (!op) {
		/* do not reach for ATIO queue here.  This is best effort err
		 * recovery at this point.
		 */
7290
		qlt_response_pkt_all_vps(vha, rsp, pkt);
7291 7292 7293 7294 7295
		return;
	}

	memcpy(&op->atio, pkt, sizeof(*pkt));
	op->vha = vha;
7296
	op->chip_reset = vha->hw->base_qpair->chip_reset;
7297
	op->rsp = rsp;
7298 7299 7300 7301 7302
	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
	queue_work(qla_tgt_wq, &op->work);
	return;
}

7303 7304 7305 7306 7307 7308
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return 0;

K
Kees Cook 已提交
7309 7310 7311
	ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
				     sizeof(struct qla_tgt_vp_map),
				     GFP_KERNEL);
7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342
	if (!ha->tgt.tgt_vp_map)
		return -ENOMEM;

	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
	    &ha->tgt.atio_dma, GFP_KERNEL);
	if (!ha->tgt.atio_ring) {
		kfree(ha->tgt.tgt_vp_map);
		return -ENOMEM;
	}
	return 0;
}

void
qlt_mem_free(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.atio_ring) {
		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
		    ha->tgt.atio_dma);
	}
	kfree(ha->tgt.tgt_vp_map);
}

/* vport_slock to be held by the caller */
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
7343 7344 7345 7346
	void *slot;
	u32 key;
	int rc;

7347 7348 7349
	if (!QLA_TGT_MODE_ENABLED())
		return;

7350 7351
	key = vha->d_id.b24;

7352 7353 7354 7355 7356
	switch (cmd) {
	case SET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
		break;
	case SET_AL_PA:
7357 7358
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (!slot) {
7359
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7360 7361 7362 7363
			    "Save vha in host_map %p %06x\n", vha, key);
			rc = btree_insert32(&vha->hw->tgt.host_map,
				key, vha, GFP_ATOMIC);
			if (rc)
7364
				ql_log(ql_log_info, vha, 0xd03e,
7365 7366 7367 7368
				    "Unable to insert s_id into host_map: %06x\n",
				    key);
			return;
		}
7369 7370
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
		    "replace existing vha in host_map %p %06x\n", vha, key);
7371
		btree_update32(&vha->hw->tgt.host_map, key, vha);
7372 7373 7374 7375 7376
		break;
	case RESET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
		break;
	case RESET_AL_PA:
7377
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7378 7379 7380 7381 7382
		   "clear vha in host_map %p %06x\n", vha, key);
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (slot)
			btree_remove32(&vha->hw->tgt.host_map, key);
		vha->d_id.b24 = 0;
7383 7384 7385 7386
		break;
	}
}

7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{

	if (!vha->d_id.b24) {
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
	} else if (vha->d_id.b24 != id.b24) {
		qlt_update_vp_map(vha, RESET_AL_PA);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
	}
}

7400 7401 7402 7403 7404 7405 7406 7407
static int __init qlt_parse_ini_mode(void)
{
	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7408 7409
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432
	else
		return false;

	return true;
}

int __init qlt_init(void)
{
	int ret;

	if (!qlt_parse_ini_mode()) {
		ql_log(ql_log_fatal, NULL, 0xe06b,
		    "qlt_parse_ini_mode() failed\n");
		return -EINVAL;
	}

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
	    qla_tgt_mgmt_cmd), 0, NULL);
	if (!qla_tgt_mgmt_cmd_cachep) {
7433
		ql_log(ql_log_fatal, NULL, 0xd04b,
7434
		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7435
		return -ENOMEM;
7436 7437
	}

7438
	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7439 7440
	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
	    0, NULL);
7441 7442 7443 7444 7445 7446 7447 7448

	if (!qla_tgt_plogi_cachep) {
		ql_log(ql_log_fatal, NULL, 0xe06d,
		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
		ret = -ENOMEM;
		goto out_mgmt_cmd_cachep;
	}

7449 7450 7451 7452 7453 7454
	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
	if (!qla_tgt_mgmt_cmd_mempool) {
		ql_log(ql_log_fatal, NULL, 0xe06e,
		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
		ret = -ENOMEM;
7455
		goto out_plogi_cachep;
7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471
	}

	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
	if (!qla_tgt_wq) {
		ql_log(ql_log_fatal, NULL, 0xe06f,
		    "alloc_workqueue for qla_tgt_wq failed\n");
		ret = -ENOMEM;
		goto out_cmd_mempool;
	}
	/*
	 * Return 1 to signal that initiator-mode is being disabled
	 */
	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;

out_cmd_mempool:
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7472 7473
out_plogi_cachep:
	kmem_cache_destroy(qla_tgt_plogi_cachep);
7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485
out_mgmt_cmd_cachep:
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
	return ret;
}

void qlt_exit(void)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	destroy_workqueue(qla_tgt_wq);
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7486
	kmem_cache_destroy(qla_tgt_plogi_cachep);
7487 7488
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
}