qla_target.c 191.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
 *
 *  based on qla2x00t.c code:
 *
 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
 *  Copyright (C) 2004 - 2005 Leonid Stoljar
 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
 *  Copyright (C) 2006 - 2010 ID7 Ltd.
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
13
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation, version 2
 *  of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>

#include "qla_def.h"
#include "qla_target.h"

45 46 47 48 49
static int ql2xtgt_tape_enable;
module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xtgt_tape_enable,
		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");

50 51 52 53 54 55 56 57
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
	"Determines when initiator mode will be enabled. Possible values: "
	"\"exclusive\" - initiator mode will be enabled on load, "
	"disabled on enabling target mode and then on disabling target mode "
	"enabled back; "
	"\"disabled\" - initiator mode will never be enabled; "
58 59
	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
	"when ready "
60 61
	"\"enabled\" (default) - initiator mode will always stay enabled.");

62
static int ql_dm_tgt_ex_pct = 0;
63 64 65 66 67 68
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
	"For Dual Mode (qlini_mode=dual), this parameter determines "
	"the percentage of exchanges/cmds FW will allocate resources "
	"for Target mode.");

69 70 71 72 73 74 75
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
    "User to control IRQ placement via smp_affinity."
    "Valid with qlini_mode=disabled."
    "1(default): enable");

76
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
77

78 79
static int temp_sam_status = SAM_STAT_BUSY;

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/*
 * From scsi/fc/fc_fcp.h
 */
enum fcp_resp_rsp_codes {
	FCP_TMF_CMPL = 0,
	FCP_DATA_LEN_INVALID = 1,
	FCP_CMND_FIELDS_INVALID = 2,
	FCP_DATA_PARAM_MISMATCH = 3,
	FCP_TMF_REJECTED = 4,
	FCP_TMF_FAILED = 5,
	FCP_TMF_INVALID_LUN = 9,
};

/*
 * fc_pri_ta from scsi/fc/fc_fcp.h
 */
#define FCP_PTA_SIMPLE      0   /* simple task attribute */
#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
#define FCP_PTA_ORDERED     2   /* ordered task attribute */
99
#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#define FCP_PTA_MASK        7   /* mask for task attribute field */
#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */

/*
 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 * must be called under HW lock and could unlock/lock it inside.
 * It isn't an issue, since in the current implementation on the time when
 * those functions are called:
 *
 *   - Either context is IRQ and only IRQ handler can modify HW data,
 *     including rings related fields,
 *
 *   - Or access to target mode variables from struct qla_tgt doesn't
 *     cross those functions boundaries, except tgt_stop, which
 *     additionally protected by irq_cmd_count.
 */
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
119
	struct atio_from_isp *pkt, uint8_t);
120 121
static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
	response_t *pkt);
122
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
123
	int fn, void *iocb, int flags);
124
static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
125
	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
126 127
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull);
128
static void qlt_disable_vha(struct scsi_qla_host *vha);
129
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
130
static void qlt_send_notify_ack(struct qla_qpair *qpair,
131 132 133
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
134 135
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked);
136 137 138
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
	fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
139 140
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
	struct abts_recv_from_24xx *);
141 142
static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
    uint16_t);
143

144 145 146 147
/*
 * Global Variables
 */
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
148
static struct kmem_cache *qla_tgt_plogi_cachep;
149 150 151 152 153
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);

154 155 156 157 158 159 160 161 162 163 164 165 166 167
static const char *prot_op_str(u32 prot_op)
{
	switch (prot_op) {
	case TARGET_PROT_NORMAL:	return "NORMAL";
	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
	default:			return "UNKNOWN";
	}
}

168 169 170 171 172 173 174 175 176 177
/* This API intentionally takes dest as a parameter, rather than returning
 * int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
{
	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
	*dest = atomic_inc_return(&base_vha->generation_tick);
	/* memory barrier */
	wmb();
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
	/* Send marker if required */
	if (unlikely(vha->marker_needed != 0)) {
		int rc = qla2x00_issue_marker(vha, vha_locked);
		if (rc != QLA_SUCCESS) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
			    "qla_target(%d): issue_marker() failed\n",
			    vha->vp_idx);
		}
		return rc;
	}
	return QLA_SUCCESS;
}

static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
	uint8_t *d_id)
{
198 199
	struct scsi_qla_host *host;
	uint32_t key = 0;
200

201 202
	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
	    (vha->d_id.b.al_pa == d_id[2]))
203 204
		return vha;

205 206 207
	key  = (uint32_t)d_id[0] << 16;
	key |= (uint32_t)d_id[1] <<  8;
	key |= (uint32_t)d_id[2];
208

209 210
	host = btree_lookup32(&vha->hw->tgt.host_map, key);
	if (!host)
211 212
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
		    "Unable to find host %06x\n", key);
213 214

	return host;
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
}

static inline
struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
	uint16_t vp_idx)
{
	struct qla_hw_data *ha = vha->hw;

	if (vha->vp_idx == vp_idx)
		return vha;

	BUG_ON(ha->tgt.tgt_vp_map == NULL);
	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
		return ha->tgt.tgt_vp_map[vp_idx].vha;

	return NULL;
}

233 234 235 236 237 238 239
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);

	vha->hw->tgt.num_pend_cmds++;
240 241
	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
		vha->qla_stats.stat_max_pend_cmds =
242 243 244 245 246 247 248 249 250 251 252 253
			vha->hw->tgt.num_pend_cmds;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
	vha->hw->tgt.num_pend_cmds--;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}

254 255

static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
256
	struct atio_from_isp *atio, uint8_t ha_locked)
257 258 259 260 261 262
{
	struct qla_tgt_sess_op *u;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;

	if (tgt->tgt_stop) {
263 264 265
		ql_dbg(ql_dbg_async, vha, 0x502c,
		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
		    vha->vp_idx);
266 267 268 269
		goto out_term;
	}

	u = kzalloc(sizeof(*u), GFP_ATOMIC);
270
	if (u == NULL)
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
		goto out_term;

	u->vha = vha;
	memcpy(&u->atio, atio, sizeof(*atio));
	INIT_LIST_HEAD(&u->cmd_list);

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	schedule_delayed_work(&vha->unknown_atio_work, 1);

out:
	return;

out_term:
287
	qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
288 289 290 291 292 293 294 295 296 297 298 299 300 301
	goto out;
}

static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u, *t;
	scsi_qla_host_t *host;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;
	uint8_t queued = 0;

	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
		if (u->aborted) {
302 303
			ql_dbg(ql_dbg_async, vha, 0x502e,
			    "Freeing unknown %s %p, because of Abort\n",
304
			    "ATIO_TYPE7", u);
305 306
			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
			    &u->atio, ha_locked, 0);
307 308 309 310 311
			goto abort;
		}

		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
		if (host != NULL) {
312 313
			ql_dbg(ql_dbg_async, vha, 0x502f,
			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
314 315
			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
		} else if (tgt->tgt_stop) {
316 317 318
			ql_dbg(ql_dbg_async, vha, 0x503a,
			    "Freeing unknown %s %p, because tgt is being stopped\n",
			    "ATIO_TYPE7", u);
319 320
			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
			    &u->atio, ha_locked, 0);
321
		} else {
322 323
			ql_dbg(ql_dbg_async, vha, 0x503d,
			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
			if (!queued) {
				queued = 1;
				schedule_delayed_work(&vha->unknown_atio_work,
				    1);
			}
			continue;
		}

abort:
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
		list_del(&u->cmd_list);
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
		kfree(u);
	}
}

void qlt_unknown_atio_work_fn(struct work_struct *work)
{
	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
	    struct scsi_qla_host, unknown_atio_work);

	qlt_try_to_dequeue_unknown_atios(vha, 0);
}

348
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
349
	struct atio_from_isp *atio, uint8_t ha_locked)
350
{
351 352 353 354 355
	ql_dbg(ql_dbg_tgt, vha, 0xe072,
		"%s: qla_target(%d): type %x ox_id %04x\n",
		__func__, vha->vp_idx, atio->u.raw.entry_type,
		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));

356 357 358 359 360 361 362 363 364 365 366 367
	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
	{
		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
		    atio->u.isp24.fcp_hdr.d_id);
		if (unlikely(NULL == host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
			    "qla_target(%d): Received ATIO_TYPE7 "
			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
			    atio->u.isp24.fcp_hdr.d_id[0],
			    atio->u.isp24.fcp_hdr.d_id[1],
			    atio->u.isp24.fcp_hdr.d_id[2]);
368 369 370


			qlt_queue_unknown_atio(vha, atio, ha_locked);
371 372
			break;
		}
373 374 375
		if (unlikely(!list_empty(&vha->unknown_atio_list)))
			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);

376
		qlt_24xx_atio_pkt(host, atio, ha_locked);
377 378 379 380 381 382 383 384 385
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)atio;

386 387
		qlt_issue_marker(vha, ha_locked);

388 389 390 391 392 393 394 395 396 397 398 399 400
		if ((entry->u.isp24.vp_index != 0xFF) &&
		    (entry->u.isp24.nport_handle != 0xFFFF)) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
				    "qla_target(%d): Received "
				    "ATIO (IMMED_NOTIFY_TYPE) "
				    "with unknown vp_index %d\n",
				    vha->vp_idx, entry->u.isp24.vp_index);
				break;
			}
		}
401
		qlt_24xx_atio_pkt(host, atio, ha_locked);
402 403 404
		break;
	}

405 406 407 408 409 410 411 412 413 414 415
	case VP_RPT_ID_IOCB_TYPE:
		qla24xx_report_id_acquisition(vha,
			(struct vp_rpt_id_entry_24xx *)atio);
		break;

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
			(struct abts_recv_from_24xx *)atio;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
			entry->vp_index);
416 417
		unsigned long flags;

418
		if (unlikely(!host)) {
419
			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
420 421 422 423 424
			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
425 426 427 428 429
		if (!ha_locked)
			spin_lock_irqsave(&host->hw->hardware_lock, flags);
		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
		if (!ha_locked)
			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
430 431 432 433 434
		break;
	}

	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */

435 436 437 438 439 440 441
	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe040,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

442
	return false;
443 444
}

445 446
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
447 448
{
	switch (pkt->entry_type) {
449 450 451 452
	case CTIO_CRC2:
		ql_dbg(ql_dbg_tgt, vha, 0xe073,
			"qla_target(%d):%s: CRC2 Response pkt\n",
			vha->vp_idx, __func__);
453 454 455 456 457 458 459 460 461 462 463 464
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe041,
			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
465
		qlt_response_pkt(host, rsp, pkt);
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)pkt;

		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe042,
			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->u.isp24.vp_index);
			break;
		}
483
		qlt_response_pkt(host, rsp, pkt);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
		break;
	}

	case NOTIFY_ACK_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;

		if (0xFF != entry->u.isp24.vp_index) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe043,
				    "qla_target(%d): Response "
				    "pkt (NOTIFY_ACK_TYPE) "
				    "received, with unknown "
				    "vp_index %d\n", vha->vp_idx,
				    entry->u.isp24.vp_index);
				break;
			}
		}
505
		qlt_response_pkt(host, rsp, pkt);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
		break;
	}

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
		    (struct abts_recv_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe044,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
522
		qlt_response_pkt(host, rsp, pkt);
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		break;
	}

	case ABTS_RESP_24XX:
	{
		struct abts_resp_to_24xx *entry =
		    (struct abts_resp_to_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe045,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
539
		qlt_response_pkt(host, rsp, pkt);
540 541 542 543
		break;
	}

	default:
544
		qlt_response_pkt(vha, rsp, pkt);
545 546 547 548 549
		break;
	}

}

550 551 552
/*
 * All qlt_plogi_ack_t operations are protected by hardware_lock
 */
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	struct qla_work_evt *e;
	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.nack.fcport = fcport;
	e->u.nack.type = type;
	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
	return qla2x00_post_work(vha, e);
}

static
568
void qla2x00_async_nack_sp_done(void *s, int res)
569 570
{
	struct srb *sp = (struct srb *)s;
571
	struct scsi_qla_host *vha = sp->vha;
572 573
	unsigned long flags;

574 575 576
	ql_dbg(ql_dbg_disc, vha, 0x20f2,
	    "Async done-%s res %x %8phC  type %d\n",
	    sp->name, res, sp->fcport->port_name, sp->type);
577 578 579

	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	sp->fcport->flags &= ~FCF_ASYNC_SENT;
580
	sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
581 582 583 584 585 586

	switch (sp->type) {
	case SRB_NACK_PLOGI:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
		sp->fcport->logout_on_delete = 1;
587
		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
588 589 590 591 592 593 594 595 596 597 598 599 600 601
		break;

	case SRB_NACK_PRLI:
		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
		sp->fcport->deleted = 0;

		if (!sp->fcport->login_succ &&
		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
			sp->fcport->login_succ = 1;

			vha->fcport_count++;

			if (!IS_IIDMA_CAPABLE(vha->hw) ||
			    !vha->hw->flags.gpsc_supported) {
602 603 604 605 606
				ql_dbg(ql_dbg_disc, vha, 0x20f3,
				    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
607 608 609

				qla24xx_post_upd_fcport_work(vha, sp->fcport);
			} else {
610 611 612 613 614
				ql_dbg(ql_dbg_disc, vha, 0x20f5,
				    "%s %d %8phC post gpsc fcp_cnt %d\n",
				    __func__, __LINE__,
				    sp->fcport->port_name,
				    vha->fcport_count);
615 616 617 618 619 620 621 622 623 624 625 626 627 628

				qla24xx_post_gpsc_work(vha, sp->fcport);
			}
		}
		break;

	case SRB_NACK_LOGO:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
		break;
	}
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

629
	sp->free(sp);
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
}

int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	int rval = QLA_FUNCTION_FAILED;
	srb_t *sp;
	char *c = NULL;

	fcport->flags |= FCF_ASYNC_SENT;
	switch (type) {
	case SRB_NACK_PLOGI:
		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
		c = "PLOGI";
		break;
	case SRB_NACK_PRLI:
		fcport->fw_login_state = DSC_LS_PRLI_PEND;
647
		fcport->deleted = 0;
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
		c = "PRLI";
		break;
	case SRB_NACK_LOGO:
		fcport->fw_login_state = DSC_LS_LOGO_PEND;
		c = "LOGO";
		break;
	}

	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
	if (!sp)
		goto done;

	sp->type = type;
	sp->name = "nack";

	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);

	sp->u.iocb_cmd.u.nack.ntfy = ntfy;

	sp->done = qla2x00_async_nack_sp_done;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS)
		goto done_free_sp;

673 674 675
	ql_dbg(ql_dbg_disc, vha, 0x20f4,
	    "Async-%s %8phC hndl %x %s\n",
	    sp->name, fcport->port_name, sp->handle, c);
676 677 678 679

	return rval;

done_free_sp:
680
	sp->free(sp);
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
done:
	fcport->flags &= ~FCF_ASYNC_SENT;
	return rval;
}

void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
	fc_port_t *t;
	unsigned long flags;

	switch (e->u.nack.type) {
	case SRB_NACK_PRLI:
		mutex_lock(&vha->vha_tgt.tgt_mutex);
		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
		if (t) {
697
			ql_log(ql_log_info, vha, 0xd034,
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
			    "%s create sess success %p", __func__, t);
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
			/* create sess has an extra kref */
			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
		}
		break;
	}
	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
}

void qla24xx_delete_sess_fn(struct work_struct *work)
{
	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
	struct qla_hw_data *ha = fcport->vha->hw;
	unsigned long flags;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);

	if (fcport->se_sess) {
		ha->tgt.tgt_ops->shutdown_sess(fcport);
		ha->tgt.tgt_ops->put_sess(fcport);
	} else {
		qlt_unreg_sess(fcport);
	}
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

/*
 * Called from qla2x00_reg_remote_port()
 */
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct fc_port *sess = fcport;
	unsigned long flags;

	if (!vha->hw->tgt.tgt_ops)
		return;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (tgt->tgt_stop) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (fcport->disc_state == DSC_DELETE_PEND) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (!sess->se_sess) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		mutex_lock(&vha->vha_tgt.tgt_mutex);
		sess = qlt_create_sess(vha, fcport, false);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	} else {
		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		if (!kref_get_unless_zero(&sess->sess_kref)) {
766
			ql_dbg(ql_dbg_disc, vha, 0x2107,
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
			    "%s: kref_get fail sess %8phC \n",
			    __func__, sess->port_name);
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
		    "qla_target(%u): %ssession for port %8phC "
		    "(loop ID %d) reappeared\n", vha->vp_idx,
		    sess->local ? "local " : "", sess->port_name, sess->loop_id);

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
		    "Reappeared sess %p\n", sess);

		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
		    fcport->loop_id,
		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
	}

	if (sess && sess->local) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
		    "qla_target(%u): local session for "
		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
		    fcport->port_name, sess->loop_id);
		sess->local = 0;
	}
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
796 797 798 799 800 801

/*
 * This is a zero-base ref-counting solution, since hardware_lock
 * guarantees that ref_count is not modified concurrently.
 * Upon successful return content of iocb is undefined
 */
802
static struct qlt_plogi_ack_t *
803 804 805
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
		       struct imm_ntfy_from_isp *iocb)
{
806
	struct qlt_plogi_ack_t *pla;
807 808 809 810

	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
		if (pla->id.b24 == id->b24) {
			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
811
			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
812 813 814 815 816 817 818 819 820 821 822 823
			return pla;
		}
	}

	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
	if (!pla) {
		ql_dbg(ql_dbg_async, vha, 0x5088,
		       "qla_target(%d): Allocation of plogi_ack failed\n",
		       vha->vp_idx);
		return NULL;
	}

824
	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
825 826 827 828 829 830
	pla->id = *id;
	list_add_tail(&pla->list, &vha->plogi_ack_list);

	return pla;
}

831
void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
832
    struct qlt_plogi_ack_t *pla)
833
{
834
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
835 836 837 838
	port_id_t port_id;
	uint16_t loop_id;
	fc_port_t *fcport = pla->fcport;

839 840 841 842 843 844
	BUG_ON(!pla->ref_count);
	pla->ref_count--;

	if (pla->ref_count)
		return;

845
	ql_dbg(ql_dbg_disc, vha, 0x5089,
846
	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
847 848 849 850 851
	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
	    iocb->u.isp24.port_id[0],
	    le16_to_cpu(iocb->u.isp24.nport_handle),
	    iocb->u.isp24.exchange_address, iocb->ox_id);
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	fcport->loop_id = loop_id;
	fcport->d_id = port_id;
	qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
	}
870 871 872 873 874

	list_del(&pla->list);
	kmem_cache_free(qla_tgt_plogi_cachep, pla);
}

875
void
876 877
qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
    struct fc_port *sess, enum qlt_plogi_link_t link)
878
{
879
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
880 881 882
	/* Inc ref_count first because link might already be pointing at pla */
	pla->ref_count++;

883 884 885 886 887 888 889 890
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
		sess, link, sess->port_name,
		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		pla->ref_count, pla, link);

891 892 893
	if (sess->plogi_link[link])
		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);

894 895
	if (link == QLT_PLOGI_LINK_SAME_WWN)
		pla->fcport = sess;
896 897 898 899

	sess->plogi_link[link] = pla;
}

900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
typedef struct {
	/* These fields must be initialized by the caller */
	port_id_t id;
	/*
	 * number of cmds dropped while we were waiting for
	 * initiator to ack LOGO initialize to 1 if LOGO is
	 * triggered by a command, otherwise, to 0
	 */
	int cmd_count;

	/* These fields are used by callee */
	struct list_head list;
} qlt_port_logo_t;

static void
qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
{
	qlt_port_logo_t *tmp;
	int res;

	mutex_lock(&vha->vha_tgt.tgt_mutex);

	list_for_each_entry(tmp, &vha->logo_list, list) {
		if (tmp->id.b24 == logo->id.b24) {
			tmp->cmd_count += logo->cmd_count;
			mutex_unlock(&vha->vha_tgt.tgt_mutex);
			return;
		}
	}

	list_add_tail(&logo->list, &vha->logo_list);

	mutex_unlock(&vha->vha_tgt.tgt_mutex);

	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);

	mutex_lock(&vha->vha_tgt.tgt_mutex);
	list_del(&logo->list);
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

940 941 942 943
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
	    logo->cmd_count, res);
944 945
}

946 947
static void qlt_free_session_done(struct work_struct *work)
{
948
	struct fc_port *sess = container_of(work, struct fc_port,
949 950 951 952
	    free_work);
	struct qla_tgt *tgt = sess->tgt;
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
953 954
	unsigned long flags;
	bool logout_started = false;
955
	struct event_arg ea;
956
	scsi_qla_host_t *base_vha;
957 958 959

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
960
		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
961
		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
962
		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
963
		sess->logout_on_delete, sess->keep_nport_handle,
964
		sess->send_els_logo);
965

966

967
	if (!IS_SW_RESV_ADDR(sess->d_id)) {
968 969
		if (sess->send_els_logo) {
			qlt_port_logo_t logo;
970

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
			logo.id = sess->d_id;
			logo.cmd_count = 0;
			qlt_send_first_logo(vha, &logo);
		}

		if (sess->logout_on_delete) {
			int rc;

			rc = qla2x00_post_async_logout_work(vha, sess, NULL);
			if (rc != QLA_SUCCESS)
				ql_log(ql_log_warn, vha, 0xf085,
				    "Schedule logo failed sess %p rc %d\n",
				    sess, rc);
			else
				logout_started = true;
		}
987
	}
988

989 990 991 992 993 994
	/*
	 * Release the target session for FC Nexus from fabric module code.
	 */
	if (sess->se_sess != NULL)
		ha->tgt.tgt_ops->free_session(sess);

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	if (logout_started) {
		bool traced = false;

		while (!ACCESS_ONCE(sess->logout_completed)) {
			if (!traced) {
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
					"%s: waiting for sess %p logout\n",
					__func__, sess);
				traced = true;
			}
			msleep(100);
		}

1008
		ql_dbg(ql_dbg_disc, vha, 0xf087,
1009 1010 1011 1012 1013 1014 1015
		    "%s: sess %p logout completed\n",__func__, sess);
	}

	if (sess->logo_ack_needed) {
		sess->logo_ack_needed = 0;
		qla24xx_async_notify_ack(vha, sess,
			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1016 1017
	}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (sess->se_sess) {
		sess->se_sess = NULL;
		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
			tgt->sess_count--;
	}

	sess->disc_state = DSC_DELETED;
	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
	sess->deleted = QLA_SESS_DELETED;
	sess->login_retry = vha->hw->login_retry_count;

	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
		vha->fcport_count--;
		sess->login_succ = 0;
	}

1035
	if (sess->chip_reset != ha->base_qpair->chip_reset)
1036 1037 1038 1039 1040 1041 1042 1043 1044
		qla2x00_clear_loop_id(sess);

	if (sess->conflict) {
		sess->conflict->login_pause = 0;
		sess->conflict = NULL;
		if (!test_bit(UNLOADING, &vha->dpc_flags))
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
	}

1045
	{
1046
		struct qlt_plogi_ack_t *own =
1047
		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1048
		struct qlt_plogi_ack_t *con =
1049
		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1050
		struct imm_ntfy_from_isp *iocb;
1051 1052

		if (con) {
1053
			iocb = &con->iocb;
1054
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1055 1056 1057 1058 1059 1060
				 "se_sess %p / sess %p port %8phC is gone,"
				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
				 sess->se_sess, sess, sess->port_name,
				 own ? "releasing own PLOGI" : "no own PLOGI pending",
				 own ? own->ref_count : -1,
				 iocb->u.isp24.port_name, con->ref_count);
1061
			qlt_plogi_ack_unref(vha, con);
1062
			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1063 1064 1065 1066 1067 1068 1069 1070 1071
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
			    sess->se_sess, sess, sess->port_name,
			    own ? "releasing own PLOGI" :
			    "no own PLOGI pending",
			    own ? own->ref_count : -1);
		}

1072 1073
		if (own) {
			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1074
			qlt_plogi_ack_unref(vha, own);
1075 1076
			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		}
1077
	}
1078 1079
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1080
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1081 1082
	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
		sess, sess->port_name, vha->fcport_count);
1083

1084
	if (tgt && (tgt->sess_count == 0))
1085
		wake_up_all(&tgt->waitQ);
1086 1087 1088 1089

	if (vha->fcport_count == 0)
		wake_up_all(&vha->fcport_waitQ);

1090 1091 1092 1093
	base_vha = pci_get_drvdata(ha->pdev);
	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
		return;

1094 1095 1096 1097 1098 1099
	if (!tgt || !tgt->tgt_stop) {
		memset(&ea, 0, sizeof(ea));
		ea.event = FCME_DELETE_DONE;
		ea.fcport = sess;
		qla2x00_fcport_event_handler(vha, &ea);
	}
1100 1101
}

1102
/* ha->tgt.sess_lock supposed to be held on entry */
1103
void qlt_unreg_sess(struct fc_port *sess)
1104 1105 1106
{
	struct scsi_qla_host *vha = sess->vha;

1107
	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1108 1109 1110
	    "%s sess %p for deletion %8phC\n",
	    __func__, sess, sess->port_name);

1111 1112
	if (sess->se_sess)
		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1113

1114 1115
	qla2x00_mark_device_lost(vha, sess, 1, 1);

1116
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1117 1118 1119
	sess->disc_state = DSC_DELETE_PEND;
	sess->last_rscn_gen = sess->rscn_gen;
	sess->last_login_gen = sess->login_gen;
1120 1121 1122 1123

	INIT_WORK(&sess->free_work, qlt_free_session_done);
	schedule_work(&sess->free_work);
}
1124
EXPORT_SYMBOL(qlt_unreg_sess);
1125

1126 1127 1128
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
	struct qla_hw_data *ha = vha->hw;
1129
	struct fc_port *sess = NULL;
1130 1131 1132
	uint16_t loop_id;
	int res = 0;
	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1133
	unsigned long flags;
1134 1135 1136 1137

	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
	if (loop_id == 0xFFFF) {
		/* Global event */
1138
		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1139
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1140
		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1141
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1142
	} else {
1143
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1144
		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1145
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe000,
	    "Using sess for qla_tgt_reset: %p\n", sess);
	if (!sess) {
		res = -ESRCH;
		return res;
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1156 1157
	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1158 1159
	    mcmd, loop_id);

1160
	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1161 1162
}

1163 1164
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
1165
	if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1166 1167 1168 1169 1170 1171 1172
		sess->logout_on_delete = 0;
		sess->logo_ack_needed = 0;
		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
		sess->scan_state = 0;
	}
}

1173
/* ha->tgt.sess_lock supposed to be held on entry */
1174
void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1175 1176 1177 1178
	bool immediate)
{
	struct qla_tgt *tgt = sess->tgt;

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (sess->disc_state == DSC_DELETE_PEND)
		return;

	if (sess->disc_state == DSC_DELETED) {
		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
			wake_up_all(&tgt->waitQ);
		if (sess->vha->fcport_count == 0)
			wake_up_all(&sess->vha->fcport_waitQ);

		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1190 1191
			return;
	}
1192

1193
	sess->disc_state = DSC_DELETE_PEND;
1194

1195 1196 1197 1198 1199
	if (sess->deleted == QLA_SESS_DELETED)
		sess->logout_on_delete = 0;

	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
	qla24xx_chk_fcp_state(sess);
1200

1201 1202
	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
	    "Scheduling sess %p for deletion\n", sess);
1203

1204 1205
	schedule_work(&sess->del_work);
}
1206

1207 1208 1209 1210 1211 1212 1213
void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
{
	unsigned long flags;
	struct qla_hw_data *ha = sess->vha->hw;
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	qlt_schedule_sess_for_deletion(sess, 1);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1214 1215
}

1216
/* ha->tgt.sess_lock supposed to be held on entry */
J
Joern Engel 已提交
1217
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1218
{
1219 1220
	struct fc_port *sess;
	scsi_qla_host_t *vha = tgt->vha;
1221

1222 1223
	list_for_each_entry(sess, &vha->vp_fcports, list) {
		if (sess->se_sess)
1224
			qlt_schedule_sess_for_deletion(sess, 1);
1225
	}
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

	/* At this point tgt could be already dead */
}

static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
	uint16_t *loop_id)
{
	struct qla_hw_data *ha = vha->hw;
	dma_addr_t gid_list_dma;
	struct gid_list_info *gid_list;
	char *id_iter;
	int res, rc, i;
	uint16_t entries;

	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    &gid_list_dma, GFP_KERNEL);
	if (!gid_list) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
		    "qla_target(%d): DMA Alloc failed of %u\n",
		    vha->vp_idx, qla2x00_gid_list_size(ha));
		return -ENOMEM;
	}

	/* Get list of logged in devices */
1250
	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1251 1252 1253 1254
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
		    "qla_target(%d): get_id_list() failed: %x\n",
		    vha->vp_idx, rc);
1255
		res = -EBUSY;
1256 1257 1258 1259
		goto out_free_id_list;
	}

	id_iter = (char *)gid_list;
1260
	res = -ENOENT;
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
	for (i = 0; i < entries; i++) {
		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
		if ((gid->al_pa == s_id[2]) &&
		    (gid->area == s_id[1]) &&
		    (gid->domain == s_id[0])) {
			*loop_id = le16_to_cpu(gid->loop_id);
			res = 0;
			break;
		}
		id_iter += ha->gid_list_info_size;
	}

out_free_id_list:
	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    gid_list, gid_list_dma);
	return res;
}

/*
 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
 * Caller must put it.
 */
1283
static struct fc_port *qlt_create_sess(
1284 1285 1286 1287 1288
	struct scsi_qla_host *vha,
	fc_port_t *fcport,
	bool local)
{
	struct qla_hw_data *ha = vha->hw;
1289
	struct fc_port *sess = fcport;
1290 1291
	unsigned long flags;

1292 1293
	if (vha->vha_tgt.qla_tgt->tgt_stop)
		return NULL;
1294

1295 1296
	if (fcport->se_sess) {
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1297
			ql_dbg(ql_dbg_disc, vha, 0x20f6,
1298 1299 1300
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
1301
		}
1302
		return fcport;
1303
	}
1304
	sess->tgt = vha->vha_tgt.qla_tgt;
1305
	sess->local = local;
1306

1307 1308
	/*
	 * Under normal circumstances we want to logout from firmware when
1309 1310
	 * session eventually ends and release corresponding nport handle.
	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1311 1312
	 * code will adjust these flags as necessary.
	 */
1313 1314
	sess->logout_on_delete = 1;
	sess->keep_nport_handle = 0;
1315
	sess->logout_completed = 0;
1316

1317 1318
	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
	    &fcport->port_name[0], sess) < 0) {
1319
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
		    "(%d) %8phC check_initiator_node_acl failed\n",
		    vha->vp_idx, fcport->port_name);
		return NULL;
	} else {
		kref_init(&fcport->sess_kref);
		/*
		 * Take an extra reference to ->sess_kref here to handle
		 * fc_port access across ->tgt.sess_lock reaquire.
		 */
		if (!kref_get_unless_zero(&sess->sess_kref)) {
1330
			ql_dbg(ql_dbg_disc, vha, 0x20f7,
1331 1332 1333 1334
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
		}
1335

1336 1337 1338
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		if (!IS_SW_RESV_ADDR(sess->d_id))
			vha->vha_tgt.qla_tgt->sess_count++;
1339

1340 1341 1342 1343 1344 1345 1346 1347
		qlt_do_generation_tick(vha, &sess->generation);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
	    vha->vha_tgt.qla_tgt->sess_count);
1348 1349

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1350 1351 1352
	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1353 1354
	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1355 1356 1357 1358

	return sess;
}

1359 1360 1361 1362 1363 1364
/*
 * max_gen - specifies maximum session generation
 * at which this deletion requestion is still valid
 */
void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1365
{
1366
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1367
	struct fc_port *sess = fcport;
1368
	unsigned long flags;
1369 1370 1371 1372

	if (!vha->hw->tgt.tgt_ops)
		return;

1373
	if (!tgt)
1374 1375
		return;

1376
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1377
	if (tgt->tgt_stop) {
1378
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1379 1380
		return;
	}
1381
	if (!sess->se_sess) {
1382
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1383 1384 1385
		return;
	}

1386
	if (max_gen - sess->generation < 0) {
1387
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1388 1389 1390 1391 1392 1393 1394 1395
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
		    "Ignoring stale deletion request for se_sess %p / sess %p"
		    " for port %8phC, req_gen %d, sess_gen %d\n",
		    sess->se_sess, sess, sess->port_name, max_gen,
		    sess->generation);
		return;
	}

1396 1397 1398 1399
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);

	sess->local = 1;
	qlt_schedule_sess_for_deletion(sess, false);
1400
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
}

static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;
	int res;
	/*
	 * We need to protect against race, when tgt is freed before or
	 * inside wake_up()
	 */
1412
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1413
	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1414 1415
	    "tgt %p, sess_count=%d\n",
	    tgt, tgt->sess_count);
1416
	res = (tgt->sess_count == 0);
1417
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1418 1419 1420 1421 1422

	return res;
}

/* Called by tcm_qla2xxx configfs code */
1423
int qlt_stop_phase1(struct qla_tgt *tgt)
1424 1425 1426 1427 1428
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	mutex_lock(&qla_tgt_mutex);
	if (!vha->fc_vport) {
		struct Scsi_Host *sh = vha->host;
		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
		bool npiv_vports;

		spin_lock_irqsave(sh->host_lock, flags);
		npiv_vports = (fc_host->npiv_vports_inuse);
		spin_unlock_irqrestore(sh->host_lock, flags);

		if (npiv_vports) {
			mutex_unlock(&qla_tgt_mutex);
1441 1442
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
			    "NPIV is in use. Can not stop target\n");
1443 1444 1445
			return -EPERM;
		}
	}
1446 1447 1448
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1449 1450
		mutex_unlock(&qla_tgt_mutex);
		return -EPERM;
1451 1452
	}

1453
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1454 1455 1456 1457 1458
	    vha->host_no, vha);
	/*
	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
	 * Lock is needed, because we still can get an incoming packet.
	 */
1459
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1460
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1461
	tgt->tgt_stop = 1;
J
Joern Engel 已提交
1462
	qlt_clear_tgt_db(tgt);
1463
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1464
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1465
	mutex_unlock(&qla_tgt_mutex);
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
	    "Waiting for sess works (tgt %p)", tgt);
	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
		flush_scheduled_work();
		spin_lock_irqsave(&tgt->sess_work_lock, flags);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1478
	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1479 1480 1481 1482

	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));

	/* Big hammer */
1483 1484
	if (!ha->flags.host_shutting_down &&
	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1485 1486 1487 1488
		qlt_disable_vha(vha);

	/* Wait for sessions to clear out (just in case) */
	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1489
	return 0;
1490 1491 1492 1493 1494 1495
}
EXPORT_SYMBOL(qlt_stop_phase1);

/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
1496
	scsi_qla_host_t *vha = tgt->vha;
1497 1498

	if (tgt->tgt_stopped) {
1499
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1500 1501 1502 1503
		    "Already in tgt->tgt_stopped state\n");
		dump_stack();
		return;
	}
1504 1505 1506 1507 1508 1509
	if (!tgt->tgt_stop) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
		    "%s: phase1 stop is not completed\n", __func__);
		dump_stack();
		return;
	}
1510

1511
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1512 1513
	tgt->tgt_stop = 0;
	tgt->tgt_stopped = 1;
1514
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1515

1516
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1517 1518 1519 1520 1521
	    tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);

/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1522
static void qlt_release(struct qla_tgt *tgt)
1523
{
1524
	scsi_qla_host_t *vha = tgt->vha;
1525 1526 1527 1528
	void *node;
	u64 key = 0;
	u16 i;
	struct qla_qpair_hint *h;
1529

1530 1531 1532 1533
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
	    !tgt->tgt_stopped)
		qlt_stop_phase1(tgt);

1534
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1535 1536
		qlt_stop_phase2(tgt);

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
		unsigned long flags;

		h = &tgt->qphints[i];
		if (h->qpair) {
			spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
			list_del(&h->hint_elem);
			spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
			h->qpair = NULL;
		}
	}
	kfree(tgt->qphints);

	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
		btree_remove64(&tgt->lun_qpair_map, key);

	btree_destroy64(&tgt->lun_qpair_map);

1555
	vha->vha_tgt.qla_tgt = NULL;
1556

1557
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	    "Release of tgt %p finished\n", tgt);

	kfree(tgt);
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
	const void *param, unsigned int param_size)
{
	struct qla_tgt_sess_work_param *prm;
	unsigned long flags;

	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
	if (!prm) {
		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
		    "qla_target(%d): Unable to create session "
		    "work, command will be refused", 0);
		return -ENOMEM;
	}

	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
	    "Scheduling work (type %d, prm %p)"
	    " to find session for param %p (size %d, tgt %p)\n",
	    type, prm, param, param_size, tgt);

	prm->type = type;
	memcpy(&prm->tm_iocb, param, param_size);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	schedule_work(&tgt->sess_work);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
1598
static void qlt_send_notify_ack(struct qla_qpair *qpair,
1599 1600 1601 1602
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
1603
	struct scsi_qla_host *vha = qpair->vha;
1604 1605 1606 1607
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	struct nack_to_isp *nack;

1608 1609 1610
	if (!ha->flags.fw_started)
		return;

1611 1612
	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);

1613
	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1614 1615 1616 1617 1618 1619 1620
	if (!pkt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe049,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1621 1622
	if (vha->vha_tgt.qla_tgt != NULL)
		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1623 1624 1625 1626 1627 1628 1629

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

1630
	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1631 1632 1633
	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
1634
			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1635 1636 1637 1638
	}
	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1639
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
	nack->u.isp24.srr_reject_code = srr_reject_code;
	nack->u.isp24.srr_reject_code_expl = srr_explan;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	ql_dbg(ql_dbg_tgt, vha, 0xe005,
	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
	    vha->vp_idx, nack->u.isp24.status);

1652 1653
	/* Memory Barrier */
	wmb();
1654
	qla2x00_start_iocbs(vha, qpair->req);
1655 1656 1657 1658 1659
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
1660
static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1661 1662 1663
	struct abts_recv_from_24xx *abts, uint32_t status,
	bool ids_reversed)
{
1664
	struct scsi_qla_host *vha = qpair->vha;
1665 1666 1667 1668 1669 1670 1671 1672 1673
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl;
	uint8_t *p;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
	    ha, abts, status);

1674 1675
	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
	    NULL);
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	if (!resp) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
		return;
	}

	resp->entry_type = ABTS_RESP_24XX;
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
1690
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;
	if (ids_reversed) {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
	} else {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
	}
	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (status == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

1727
	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1728

1729 1730
	/* Memory Barrier */
	wmb();
1731 1732 1733 1734
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
	struct abts_resp_from_24xx_fw *entry)
{
	struct ctio7_to_24xx *ctio;

	ql_dbg(ql_dbg_tgt, vha, 0xe007,
	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);

1748 1749
	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
	    vha->hw->base_qpair, NULL);
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

	/*
	 * We've got on entrance firmware's response on by us generated
	 * ABTS response. So, in it ID fields are reversed.
	 */

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->nport_handle = entry->nport_handle;
	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1766
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1767 1768 1769 1770 1771
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
	ctio->exchange_addr = entry->exchange_addr_to_abort;
1772 1773
	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
					    CTIO7_FLAGS_TERMINATE);
1774
	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1775

1776 1777
	/* Memory Barrier */
	wmb();
1778 1779
	qla2x00_start_iocbs(vha, vha->req);

1780 1781
	qlt_24xx_send_abts_resp(vha->hw->base_qpair,
	    (struct abts_recv_from_24xx *)entry,
1782 1783 1784
	    FCP_TMF_CMPL, true);
}

1785 1786 1787 1788
static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
1789
	unsigned long flags;
1790

1791
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1792 1793 1794
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1795
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1796 1797 1798 1799
			return 1;
		}
	}

1800 1801 1802
	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		if (tag == op->atio.u.isp24.exchange_addr) {
			op->aborted = true;
1803
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1804 1805 1806 1807
			return 1;
		}
	}

1808 1809
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		if (tag == cmd->atio.u.isp24.exchange_addr) {
1810
			cmd->aborted = 1;
1811
			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1812 1813 1814
			return 1;
		}
	}
1815
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825

	return 0;
}

/* drop cmds for the given lun
 * XXX only looks for cmds on the port through which lun reset was recieved
 * XXX does not go through the list of other port (which may have cmds
 *     for the same lun)
 */
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1826
			        u64 lun, uint8_t *s_id)
1827 1828 1829 1830
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
1831
	unsigned long flags;
1832 1833

	key = sid_to_key(s_id);
1834
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1835 1836
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key;
1837
		u64 op_lun;
1838 1839 1840 1841 1842 1843 1844

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key;
		u64 op_lun;

		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		op_lun = scsilun_to_int(
			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
		if (op_key == key && op_lun == lun)
			op->aborted = true;
	}

1857 1858
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key;
1859
		u64 cmd_lun;
1860 1861 1862 1863 1864

		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		cmd_lun = scsilun_to_int(
			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
		if (cmd_key == key && cmd_lun == lun)
1865
			cmd->aborted = 1;
1866
	}
1867
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1868 1869
}

1870 1871
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1872
	struct abts_recv_from_24xx *abts, struct fc_port *sess)
1873 1874
{
	struct qla_hw_data *ha = vha->hw;
1875
	struct se_session *se_sess = sess->se_sess;
1876
	struct qla_tgt_mgmt_cmd *mcmd;
1877
	struct qla_tgt_cmd *cmd;
1878
	struct se_cmd *se_cmd;
1879
	int rc;
1880
	bool found_lun = false;
1881
	unsigned long flags;
1882

1883
	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1884
	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1885
		if (se_cmd->tag == abts->exchange_addr_to_abort) {
1886 1887 1888 1889
			found_lun = true;
			break;
		}
	}
1890
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1891

1892 1893 1894 1895
	/* cmd not in LIO lists, look in qla list */
	if (!found_lun) {
		if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
			/* send TASK_ABORT response immediately */
1896 1897
			qlt_24xx_send_abts_resp(ha->base_qpair, abts,
			    FCP_TMF_CMPL, false);
1898 1899 1900 1901 1902 1903 1904 1905
			return 0;
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
			    "unable to find cmd in driver or LIO for tag 0x%x\n",
			    abts->exchange_addr_to_abort);
			return -ENOENT;
		}
	}
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
	    "qla_target(%d): task abort (tag=%d)\n",
	    vha->vp_idx, abts->exchange_addr_to_abort);

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

1920
	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1921 1922
	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1923
	mcmd->reset_count = ha->base_qpair->chip_reset;
1924
	mcmd->tmr_func = QLA_TGT_ABTS;
1925
	mcmd->qpair = ha->base_qpair;
1926

1927
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
	    abts->exchange_addr_to_abort);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
		    "qla_target(%d):  tgt_ops->handle_tmr()"
		    " failed: %d", vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts)
{
	struct qla_hw_data *ha = vha->hw;
1947
	struct fc_port *sess;
1948 1949 1950
	uint32_t tag = abts->exchange_addr_to_abort;
	uint8_t s_id[3];
	int rc;
1951
	unsigned long flags;
1952 1953 1954 1955 1956

	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
		    "qla_target(%d): ABTS: Abort Sequence not "
		    "supported\n", vha->vp_idx);
1957 1958
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
1959 1960 1961 1962 1963 1964 1965
		return;
	}

	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
		    "qla_target(%d): ABTS: Unknown Exchange "
		    "Address received\n", vha->vp_idx);
1966 1967
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
		return;
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
	    le32_to_cpu(abts->fcp_hdr_le.parameter));

	s_id[0] = abts->fcp_hdr_le.s_id[2];
	s_id[1] = abts->fcp_hdr_le.s_id[1];
	s_id[2] = abts->fcp_hdr_le.s_id[0];

1981
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1982 1983 1984 1985 1986
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
		    "qla_target(%d): task abort for non-existant session\n",
		    vha->vp_idx);
1987
		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1988
		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1989 1990 1991

		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1992
		if (rc != 0) {
1993 1994
			qlt_24xx_send_abts_resp(ha->base_qpair, abts,
			    FCP_TMF_REJECTED, false);
1995 1996 1997
		}
		return;
	}
1998 1999
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

2000

2001
	if (sess->deleted) {
2002 2003
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2004 2005 2006
		return;
	}

2007 2008 2009 2010 2011
	rc = __qlt_24xx_handle_abts(vha, abts, sess);
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
		    vha->vp_idx, rc);
2012 2013
		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
		    false);
2014 2015 2016 2017 2018 2019 2020
		return;
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
2021
static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2022 2023
	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
2024
	struct scsi_qla_host *ha = qpair->vha;
2025 2026
	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
	struct ctio7_to_24xx *ctio;
2027
	uint16_t temp;
2028 2029 2030 2031 2032 2033

	ql_dbg(ql_dbg_tgt, ha, 0xe008,
	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
	    ha, atio, resp_code);


2034
	ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", ha->vp_idx, __func__);
		return;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
	ctio->nport_handle = mcmd->sess->loop_id;
2046
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2047 2048 2049 2050 2051
	ctio->vp_index = ha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2052 2053 2054
	temp = (atio->u.isp24.attr << 9)|
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2055 2056
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
2057
	ctio->u.status1.scsi_status =
2058 2059
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
	ctio->u.status1.response_len = cpu_to_le16(8);
2060
	ctio->u.status1.sense_data[0] = resp_code;
2061

2062 2063
	/* Memory Barrier */
	wmb();
2064 2065 2066 2067
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(ha, qpair->req);
2068 2069 2070 2071 2072 2073 2074 2075
}

void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
}
EXPORT_SYMBOL(qlt_free_mcmd);

2076 2077 2078 2079
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then
 * reacquire
 */
2080
void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2081 2082 2083 2084 2085
    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
	struct atio_from_isp *atio = &cmd->atio;
	struct ctio7_to_24xx *ctio;
	uint16_t temp;
2086
	struct scsi_qla_host *vha = cmd->vha;
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110

	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
	    "sense_key=%02x, asc=%02x, ascq=%02x",
	    vha, atio, scsi_status, sense_key, asc, ascq);

	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
	if (!ctio) {
		ql_dbg(ql_dbg_async, vha, 0x3067,
		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
		    vha->host_no, __func__);
		goto out;
	}

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->handle = QLA_TGT_SKIP_HANDLE;
	ctio->nport_handle = cmd->sess->loop_id;
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2111 2112 2113
	temp = (atio->u.isp24.attr << 9) |
	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
	ctio->u.status1.flags = cpu_to_le16(temp);
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio->u.status1.ox_id = cpu_to_le16(temp);
	ctio->u.status1.scsi_status =
	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
	ctio->u.status1.response_len = cpu_to_le16(18);
	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));

	if (ctio->u.status1.residual != 0)
		ctio->u.status1.scsi_status |=
		    cpu_to_le16(SS_RESIDUAL_UNDER);

	/* Response code and sense key */
	put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
	    (&ctio->u.status1.sense_data)[0]);
	/* Additional sense length */
	put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
	/* ASC and ASCQ */
	put_unaligned_le32(((asc << 24) | (ascq << 16)),
	    (&ctio->u.status1.sense_data)[3]);

	/* Memory Barrier */
	wmb();

2137 2138 2139 2140 2141
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);

2142 2143 2144 2145
out:
	return;
}

2146 2147 2148 2149 2150 2151
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
	struct scsi_qla_host *vha = mcmd->sess->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
2152
	struct qla_qpair *qpair = mcmd->qpair;
2153 2154 2155 2156 2157

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
	    "TM response mcmd (%p) status %#x state %#x",
	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);

2158
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2159

2160
	if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2161
		/*
2162
		 * Either the port is not online or this request was from
2163 2164 2165
		 * previous life, just abort the processing.
		 */
		ql_dbg(ql_dbg_async, vha, 0xe100,
2166 2167
			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
2168
			mcmd->reset_count, qpair->chip_reset);
2169
		ha->tgt.tgt_ops->free_mcmd(mcmd);
2170
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2171 2172 2173
		return;
	}

2174 2175
	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2176 2177 2178 2179 2180
		    ELS_LOGO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_PRLO ||
		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
		    ELS_TPRLO) {
2181
			ql_dbg(ql_dbg_disc, vha, 0x2106,
2182 2183 2184 2185 2186
			    "TM response logo %phC status %#x state %#x",
			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
			    mcmd->flags);
			qlt_schedule_sess_for_deletion_lock(mcmd->sess);
		} else {
2187 2188
			qlt_send_notify_ack(vha->hw->base_qpair,
			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2189 2190
		}
	} else {
2191
		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
2192
			qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
2193 2194
			    mcmd->fc_tm_rsp, false);
		else
2195
			qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
			    mcmd->fc_tm_rsp);
	}
	/*
	 * Make the callback for ->free_mcmd() to queue_work() and invoke
	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
	 * qlt_xmit_tm_rsp() returns here..
	 */
	ha->tgt.tgt_ops->free_mcmd(mcmd);
2207
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);

/* No locks */
static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd = prm->cmd;

	BUG_ON(cmd->sg_cnt == 0);

	prm->sg = (struct scatterlist *)cmd->sg;
2219
	prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
2220 2221 2222 2223 2224 2225
	    cmd->sg_cnt, cmd->dma_data_direction);
	if (unlikely(prm->seg_cnt == 0))
		goto out_err;

	prm->cmd->sg_mapped = 1;

2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
		/*
		 * If greater than four sg entries then we need to allocate
		 * the continuation entries
		 */
		if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
			prm->tgt->datasegs_per_cmd,
			prm->tgt->datasegs_per_cont);
	} else {
		/* DIF */
		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
			prm->tot_dsds = prm->seg_cnt;
		} else
			prm->tot_dsds = prm->seg_cnt;

		if (cmd->prot_sg_cnt) {
			prm->prot_sg      = cmd->prot_sg;
2246
			prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
				cmd->prot_sg, cmd->prot_sg_cnt,
				cmd->dma_data_direction);
			if (unlikely(prm->prot_seg_cnt == 0))
				goto out_err;

			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
				/* Dif Bundling not support here */
				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
								cmd->blk_sz);
				prm->tot_dsds += prm->prot_seg_cnt;
			} else
				prm->tot_dsds += prm->prot_seg_cnt;
		}
	}
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271

	return 0;

out_err:
	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
	    0, prm->cmd->sg_cnt);
	return -1;
}

J
Joern Engel 已提交
2272
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2273
{
2274 2275
	struct qla_hw_data *ha;
	struct qla_qpair *qpair;
J
Joern Engel 已提交
2276 2277 2278
	if (!cmd->sg_mapped)
		return;

2279 2280 2281 2282
	qpair = cmd->qpair;

	pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
	    cmd->dma_data_direction);
2283
	cmd->sg_mapped = 0;
2284 2285

	if (cmd->prot_sg_cnt)
2286
		pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2287 2288
			cmd->dma_data_direction);

2289 2290
	if (!cmd->ctx)
		return;
2291
	ha = vha->hw;
2292
	if (cmd->ctx_dsd_alloced)
2293
		qla2x00_clean_dsd_pool(ha, cmd->ctx);
2294

2295
	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2296 2297
}

2298
static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2299 2300
	uint32_t req_cnt)
{
2301
	uint32_t cnt;
2302
	struct req_que *req = qpair->req;
2303

2304
	if (req->cnt < (req_cnt + 2)) {
2305 2306
		cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
		    RD_REG_DWORD_RELAXED(req->req_q_out));
2307

2308 2309
		if  (req->ring_index < cnt)
			req->cnt = cnt - req->ring_index;
2310
		else
2311
			req->cnt = req->length - (req->ring_index - cnt);
2312

2313
		if (unlikely(req->cnt < (req_cnt + 2)))
2314
			return -EAGAIN;
2315
	}
2316

2317
	req->cnt -= req_cnt;
2318 2319 2320 2321 2322 2323 2324

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
2325
static inline void *qlt_get_req_pkt(struct req_que *req)
2326 2327
{
	/* Adjust ring index. */
2328 2329 2330 2331
	req->ring_index++;
	if (req->ring_index == req->length) {
		req->ring_index = 0;
		req->ring_ptr = req->ring;
2332
	} else {
2333
		req->ring_ptr++;
2334
	}
2335
	return (cont_entry_t *)req->ring_ptr;
2336 2337 2338
}

/* ha->hardware_lock supposed to be held on entry */
2339
static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2340 2341
{
	uint32_t h;
2342 2343
	int index;
	uint8_t found = 0;
2344
	struct req_que *req = qpair->req;
2345 2346

	h = req->current_outstanding_cmd;
2347

2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
	for (index = 1; index < req->num_outstanding_cmds; index++) {
		h++;
		if (h == req->num_outstanding_cmds)
			h = 1;

		if (h == QLA_TGT_SKIP_HANDLE)
			continue;

		if (!req->outstanding_cmds[h]) {
			found = 1;
2358 2359
			break;
		}
2360
	}
2361

2362 2363 2364
	if (found) {
		req->current_outstanding_cmd = h;
	} else {
2365 2366 2367
		ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
		    "qla_target(%d): Ran out of empty cmd slots\n",
		    qpair->vha->vp_idx);
2368 2369
		h = QLA_TGT_NULL_HANDLE;
	}
2370 2371 2372 2373 2374

	return h;
}

/* ha->hardware_lock supposed to be held on entry */
2375 2376
static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
	struct qla_tgt_prm *prm)
2377 2378 2379 2380
{
	uint32_t h;
	struct ctio7_to_24xx *pkt;
	struct atio_from_isp *atio = &prm->cmd->atio;
2381
	uint16_t temp;
2382
	struct scsi_qla_host *vha = prm->cmd->vha;
2383

2384
	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2385 2386 2387 2388 2389 2390 2391
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	pkt->entry_type = CTIO_TYPE7;
	pkt->entry_count = (uint8_t)prm->req_cnt;
	pkt->vp_index = vha->vp_idx;

2392
	h = qlt_make_handle(qpair);
2393 2394 2395 2396 2397 2398 2399
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
2400 2401
	} else
		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2402

2403 2404 2405
	pkt->handle = MAKE_HANDLE(qpair->req->id, h);
	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2406
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2407 2408 2409 2410
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr = atio->u.isp24.exchange_addr;
2411 2412
	temp = atio->u.isp24.attr << 9;
	pkt->u.status0.flags |= cpu_to_le16(temp);
2413 2414
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->u.status0.ox_id = cpu_to_le16(temp);
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;

	/* Build continuation packets */
	while (prm->seg_cnt > 0) {
		cont_a64_entry_t *cont_pkt64 =
2434 2435
			(cont_a64_entry_t *)qlt_get_req_pkt(
			   prm->cmd->qpair->req);
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533

		/*
		 * Make sure that from cont_pkt64 none of
		 * 64-bit specific fields used for 32-bit
		 * addressing. Cast to (cont_entry_t *) for
		 * that.
		 */

		memset(cont_pkt64, 0, sizeof(*cont_pkt64));

		cont_pkt64->entry_count = 1;
		cont_pkt64->sys_define = 0;

		if (enable_64bit_addressing) {
			cont_pkt64->entry_type = CONTINUE_A64_TYPE;
			dword_ptr =
			    (uint32_t *)&cont_pkt64->dseg_0_address;
		} else {
			cont_pkt64->entry_type = CONTINUE_TYPE;
			dword_ptr =
			    (uint32_t *)&((cont_entry_t *)
				cont_pkt64)->dseg_0_address;
		}

		/* Load continuation entry data segments */
		for (cnt = 0;
		    cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
		    cnt++, prm->seg_cnt--) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_lo32
				(sg_dma_address(prm->sg)));
			if (enable_64bit_addressing) {
				*dword_ptr++ =
				    cpu_to_le32(pci_dma_hi32
					(sg_dma_address
					(prm->sg)));
			}
			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

			prm->sg = sg_next(prm->sg);
		}
	}
}

/*
 * ha->hardware_lock supposed to be held on entry. We have already made sure
 * that there is sufficient amount of request entries to not drop it.
 */
static void qlt_load_data_segments(struct qla_tgt_prm *prm,
	struct scsi_qla_host *vha)
{
	int cnt;
	uint32_t *dword_ptr;
	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;

	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);

	/* Setup packet address segment pointer */
	dword_ptr = pkt24->u.status0.dseg_0_address;

	/* Set total data segment count */
	if (prm->seg_cnt)
		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);

	if (prm->seg_cnt == 0) {
		/* No data transfer */
		*dword_ptr++ = 0;
		*dword_ptr = 0;
		return;
	}

	/* If scatter gather */

	/* Load command entry data segments */
	for (cnt = 0;
	    (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
	    cnt++, prm->seg_cnt--) {
		*dword_ptr++ =
		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
		if (enable_64bit_addressing) {
			*dword_ptr++ =
			    cpu_to_le32(pci_dma_hi32(
				sg_dma_address(prm->sg)));
		}
		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));

		prm->sg = sg_next(prm->sg);
	}

	qlt_load_cont_data_segments(prm, vha);
}

static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
{
	return cmd->bufflen > 0;
}

2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
static void qlt_print_dif_err(struct qla_tgt_prm *prm)
{
	struct qla_tgt_cmd *cmd;
	struct scsi_qla_host *vha;

	/* asc 0x10=dif error */
	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
		cmd = prm->cmd;
		vha = cmd->vha;
		/* ASCQ */
		switch (prm->sense_buffer[13]) {
		case 1:
2546
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2547 2548 2549 2550 2551 2552
			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 2:
2553
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2554 2555 2556 2557 2558 2559
			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		case 3:
2560
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2561 2562 2563 2564 2565 2566
			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		default:
2567
			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2568 2569 2570 2571 2572 2573
			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
			    "se_cmd=%p tag[%x]",
			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
			    cmd->atio.u.isp24.exchange_addr);
			break;
		}
2574
		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2575 2576 2577
	}
}

2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
/*
 * Called without ha->hardware_lock held
 */
static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
	uint32_t *full_req_cnt)
{
	struct se_cmd *se_cmd = &cmd->se_cmd;

	prm->cmd = cmd;
2588 2589
	prm->tgt = cmd->tgt;
	prm->pkt = NULL;
2590 2591 2592 2593 2594 2595
	prm->rq_result = scsi_status;
	prm->sense_buffer = &cmd->sense_buffer[0];
	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
	prm->sg = NULL;
	prm->seg_cnt = -1;
	prm->req_cnt = 1;
2596
	prm->residual = 0;
2597
	prm->add_status_pkt = 0;
2598 2599 2600
	prm->prot_sg = NULL;
	prm->prot_seg_cnt = 0;
	prm->tot_dsds = 0;
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610

	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
		if  (qlt_pci_map_calc_cnt(prm) != 0)
			return -EAGAIN;
	}

	*full_req_cnt = prm->req_cnt;

	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2611
		ql_dbg(ql_dbg_io + ql_dbg_verbose, cmd->vha, 0x305c,
2612 2613 2614 2615
		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag,
		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
		       cmd->bufflen, prm->rq_result);
2616 2617 2618
		prm->rq_result |= SS_RESIDUAL_UNDER;
	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
		prm->residual = se_cmd->residual_count;
2619
		ql_dbg(ql_dbg_io, cmd->vha, 0x305d,
2620 2621 2622
		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632
		prm->rq_result |= SS_RESIDUAL_OVER;
	}

	if (xmit_type & QLA_TGT_XMIT_STATUS) {
		/*
		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
		 * ignored in *xmit_response() below
		 */
		if (qlt_has_data(cmd)) {
			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2633
			    (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
			    (prm->rq_result != 0))) {
				prm->add_status_pkt = 1;
				(*full_req_cnt)++;
			}
		}
	}

	return 0;
}

2644 2645
static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
    int sending_sense)
2646
{
2647
	if (cmd->qpair->enable_class_2)
2648 2649 2650 2651 2652
		return 0;

	if (sending_sense)
		return cmd->conf_compl_supported;
	else
2653 2654
		return cmd->qpair->enable_explicit_conf &&
                    cmd->conf_compl_supported;
2655 2656 2657 2658 2659 2660 2661
}

static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
	struct qla_tgt_prm *prm)
{
	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2662
	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2663
	if (qlt_need_explicit_conf(prm->cmd, 0)) {
2664
		ctio->u.status0.flags |= cpu_to_le16(
2665 2666 2667 2668 2669 2670 2671 2672
		    CTIO7_FLAGS_EXPLICIT_CONFORM |
		    CTIO7_FLAGS_CONFORM_REQ);
	}
	ctio->u.status0.residual = cpu_to_le32(prm->residual);
	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
		int i;

2673
		if (qlt_need_explicit_conf(prm->cmd, 1)) {
2674
			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2675 2676 2677 2678 2679 2680
				ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
				    "Skipping EXPLICIT_CONFORM and "
				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
				    "non GOOD status\n");
				goto skip_explict_conf;
			}
2681
			ctio->u.status1.flags |= cpu_to_le16(
2682 2683 2684 2685 2686
			    CTIO7_FLAGS_EXPLICIT_CONFORM |
			    CTIO7_FLAGS_CONFORM_REQ);
		}
skip_explict_conf:
		ctio->u.status1.flags &=
2687
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2688
		ctio->u.status1.flags |=
2689
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2690
		ctio->u.status1.scsi_status |=
2691
		    cpu_to_le16(SS_SENSE_LEN_VALID);
2692 2693 2694 2695 2696
		ctio->u.status1.sense_length =
		    cpu_to_le16(prm->sense_buffer_len);
		for (i = 0; i < prm->sense_buffer_len/4; i++)
			((uint32_t *)ctio->u.status1.sense_data)[i] =
				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2697 2698 2699

		qlt_print_dif_err(prm);

2700 2701
	} else {
		ctio->u.status1.flags &=
2702
		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2703
		ctio->u.status1.flags |=
2704
		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2705 2706 2707 2708 2709 2710 2711 2712
		ctio->u.status1.sense_length = 0;
		memset(ctio->u.status1.sense_data, 0,
		    sizeof(ctio->u.status1.sense_data));
	}

	/* Sense with len > 24, is it possible ??? */
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
		if (ql2xenablehba_err_chk >= 1)
			return 1;
		break;
	case TARGET_PROT_DOUT_PASS:
	case TARGET_PROT_DIN_PASS:
		if (ql2xenablehba_err_chk >= 2)
			return 1;
		break;
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		return 1;
	default:
		break;
	}
	return 0;
}

2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
static inline int
qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
{
	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
	    return 1;
	default:
	    return 0;
	}
	return 0;
}

2753
/*
2754
 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2755
 */
2756 2757 2758
static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
    uint16_t *pfw_prot_opts)
2759
{
2760
	struct se_cmd *se_cmd = &cmd->se_cmd;
2761
	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2762 2763 2764
	scsi_qla_host_t *vha = cmd->tgt->vha;
	struct qla_hw_data *ha = vha->hw;
	uint32_t t32 = 0;
2765

2766 2767
	/*
	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2768 2769 2770
	 * have been immplemented by TCM, before AppTag is avail.
	 * Look for modesense_handlers[]
	 */
2771
	ctx->app_tag = 0;
2772 2773 2774
	ctx->app_tag_mask[0] = 0x0;
	ctx->app_tag_mask[1] = 0x0;

2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
	if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);

2785 2786 2787
	switch (se_cmd->prot_type) {
	case TARGET_DIF_TYPE0_PROT:
		/*
2788 2789 2790
		 * No check for ql2xenablehba_err_chk, as it
		 * would be an I/O error if hba tag generation
		 * is not done.
2791 2792 2793 2794 2795 2796 2797 2798 2799
		 */
		ctx->ref_tag = cpu_to_le32(lba);
		/* enable ALL bytes of the ref tag */
		ctx->ref_tag_mask[0] = 0xff;
		ctx->ref_tag_mask[1] = 0xff;
		ctx->ref_tag_mask[2] = 0xff;
		ctx->ref_tag_mask[3] = 0xff;
		break;
	case TARGET_DIF_TYPE1_PROT:
2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815
	    /*
	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
	     * REF tag, and 16 bit app tag.
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2816
	case TARGET_DIF_TYPE2_PROT:
2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
	    /*
	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
	     * tag has to match LBA in CDB + N
	     */
	    ctx->ref_tag = cpu_to_le32(lba);
	    if (!qla_tgt_ref_mask_check(se_cmd) ||
		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
		    break;
	    }
	    /* enable ALL bytes of the ref tag */
	    ctx->ref_tag_mask[0] = 0xff;
	    ctx->ref_tag_mask[1] = 0xff;
	    ctx->ref_tag_mask[2] = 0xff;
	    ctx->ref_tag_mask[3] = 0xff;
	    break;
2833
	case TARGET_DIF_TYPE3_PROT:
2834 2835 2836 2837 2838
	    /* For TYPE 3 protection: 16 bit GUARD only */
	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
	    break;
2839 2840 2841 2842
	}
}

static inline int
2843
qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
{
	uint32_t		*cur_dsd;
	uint32_t		transfer_length = 0;
	uint32_t		data_bytes;
	uint32_t		dif_bytes;
	uint8_t			bundling = 1;
	uint8_t			*clr_ptr;
	struct crc_context	*crc_ctx_pkt = NULL;
	struct qla_hw_data	*ha;
	struct ctio_crc2_to_fw	*pkt;
	dma_addr_t		crc_ctx_dma;
	uint16_t		fw_prot_opts = 0;
	struct qla_tgt_cmd	*cmd = prm->cmd;
	struct se_cmd		*se_cmd = &cmd->se_cmd;
	uint32_t h;
	struct atio_from_isp *atio = &prm->cmd->atio;
2860
	struct qla_tc_param	tc;
2861
	uint16_t t16;
2862
	scsi_qla_host_t *vha = cmd->vha;
2863 2864 2865

	ha = vha->hw;

2866
	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
	prm->pkt = pkt;
	memset(pkt, 0, sizeof(*pkt));

	ql_dbg(ql_dbg_tgt, vha, 0xe071,
		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
		vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);

	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
		bundling = 0;

	/* Compute dif len and adjust data len to incude protection */
	data_bytes = cmd->bufflen;
	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_STRIP:
		transfer_length = data_bytes;
2887 2888
		if (cmd->prot_sg_cnt)
			data_bytes += dif_bytes;
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_INSERT:
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		transfer_length = data_bytes + dif_bytes;
		break;
	default:
		BUG();
		break;
	}

	if (!qlt_hba_err_chk_enabled(se_cmd))
		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
	/* HBA error checking enabled */
	else if (IS_PI_UNINIT_CAPABLE(ha)) {
		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
	}

	switch (se_cmd->prot_op) {
	case TARGET_PROT_DIN_INSERT:
	case TARGET_PROT_DOUT_INSERT:
		fw_prot_opts |= PO_MODE_DIF_INSERT;
		break;
	case TARGET_PROT_DIN_STRIP:
	case TARGET_PROT_DOUT_STRIP:
		fw_prot_opts |= PO_MODE_DIF_REMOVE;
		break;
	case TARGET_PROT_DIN_PASS:
	case TARGET_PROT_DOUT_PASS:
		fw_prot_opts |= PO_MODE_DIF_PASS;
		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
		break;
	default:/* Normal Request */
		fw_prot_opts |= PO_MODE_DIF_PASS;
		break;
	}

	/* ---- PKT ---- */
	/* Update entry type to indicate Command Type CRC_2 IOCB */
	pkt->entry_type  = CTIO_CRC2;
	pkt->entry_count = 1;
	pkt->vp_index = vha->vp_idx;

2937
	h = qlt_make_handle(qpair);
2938 2939 2940 2941 2942 2943 2944 2945
	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
		/*
		 * CTIO type 7 from the firmware doesn't provide a way to
		 * know the initiator's LOOP ID, hence we can't find
		 * the session and, so, the command.
		 */
		return -EAGAIN;
	} else
2946
		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2947

2948 2949
	pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2950
	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2951
	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2952 2953 2954 2955
	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2956 2957 2958 2959 2960 2961 2962

	/* silence compile warning */
	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	pkt->ox_id  = cpu_to_le16(t16);

	t16 = (atio->u.isp24.attr << 9);
	pkt->flags |= cpu_to_le16(t16);
2963 2964 2965 2966
	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);

	/* Set transfer direction */
	if (cmd->dma_data_direction == DMA_TO_DEVICE)
2967
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2968
	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2969
		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993

	pkt->dseg_count = prm->tot_dsds;
	/* Fibre channel byte count */
	pkt->transfer_length = cpu_to_le32(transfer_length);

	/* ----- CRC context -------- */

	/* Allocate CRC context from global pool */
	crc_ctx_pkt = cmd->ctx =
	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);

	if (!crc_ctx_pkt)
		goto crc_queuing_error;

	/* Zero out CTX area. */
	clr_ptr = (uint8_t *)crc_ctx_pkt;
	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));

	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);

	/* Set handle */
	crc_ctx_pkt->handle = pkt->handle;

2994
	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017

	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;

	if (!bundling) {
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
	} else {
		/*
		 * Configure Bundling if we need to fetch interlaving
		 * protection PCI accesses
		 */
		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
		crc_ctx_pkt->u.bundling.dseg_count =
			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
	}

	/* Finish the common fields of CRC pkt */
	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3018
	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3019

3020 3021 3022 3023 3024 3025 3026 3027
	memset((uint8_t *)&tc, 0 , sizeof(tc));
	tc.vha = vha;
	tc.blk_sz = cmd->blk_sz;
	tc.bufflen = cmd->bufflen;
	tc.sg = cmd->sg;
	tc.prot_sg = cmd->prot_sg;
	tc.ctx = crc_ctx_pkt;
	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3028 3029

	/* Walks data segments */
3030
	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3031 3032 3033

	if (!bundling && prm->prot_seg_cnt) {
		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3034
			prm->tot_dsds, &tc))
3035 3036
			goto crc_queuing_error;
	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3037
		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
3038 3039 3040 3041
		goto crc_queuing_error;

	if (bundling && prm->prot_seg_cnt) {
		/* Walks dif segments */
3042
		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3043 3044 3045

		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3046
			prm->prot_seg_cnt, &tc))
3047 3048 3049 3050 3051 3052
			goto crc_queuing_error;
	}
	return QLA_SUCCESS;

crc_queuing_error:
	/* Cleanup will be performed by the caller */
3053
	qpair->req->outstanding_cmds[h] = NULL;
3054 3055 3056 3057

	return QLA_FUNCTION_FAILED;
}

3058 3059 3060 3061 3062 3063 3064 3065
/*
 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
 */
int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	uint8_t scsi_status)
{
	struct scsi_qla_host *vha = cmd->vha;
3066
	struct qla_qpair *qpair = cmd->qpair;
3067 3068 3069 3070 3071 3072
	struct ctio7_to_24xx *pkt;
	struct qla_tgt_prm prm;
	uint32_t full_req_cnt = 0;
	unsigned long flags = 0;
	int res;

3073
	if (cmd->sess && cmd->sess->deleted) {
3074 3075 3076 3077 3078
		cmd->state = QLA_TGT_STATE_PROCESSED;
		if (cmd->sess->logout_completed)
			/* no need to terminate. FW already freed exchange. */
			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		else
3079
			qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
3080 3081 3082
		return 0;
	}

3083
	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
3084
	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3085 3086
	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3087
	    &cmd->se_cmd, qpair->id);
3088 3089 3090 3091 3092 3093 3094

	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
	    &full_req_cnt);
	if (unlikely(res != 0)) {
		return res;
	}

3095
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3096

3097 3098 3099 3100 3101
	if (xmit_type == QLA_TGT_XMIT_STATUS)
		vha->tgt_counters.core_qla_snd_status++;
	else
		vha->tgt_counters.core_qla_que_buf++;

3102
	if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3103
		/*
3104
		 * Either the port is not online or this request was from
3105 3106 3107 3108 3109
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_PROCESSED;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe101,
3110 3111
			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
3112
			cmd->reset_count, qpair->chip_reset);
3113
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3114 3115 3116
		return 0;
	}

3117
	/* Does F/W have an IOCBs for this request */
3118
	res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3119 3120 3121
	if (unlikely(res))
		goto out_unmap_unlock;

3122
	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3123
		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3124
	else
3125
		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3126
	if (unlikely(res != 0)) {
3127
		qpair->req->cnt += full_req_cnt;
3128
		goto out_unmap_unlock;
3129
	}
3130 3131 3132 3133 3134

	pkt = (struct ctio7_to_24xx *)prm.pkt;

	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
		pkt->u.status0.flags |=
3135
		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3136 3137
			CTIO7_FLAGS_STATUS_MODE_0);

3138 3139
		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
			qlt_load_data_segments(&prm, vha);
3140 3141 3142 3143 3144 3145 3146

		if (prm.add_status_pkt == 0) {
			if (xmit_type & QLA_TGT_XMIT_STATUS) {
				pkt->u.status0.scsi_status =
				    cpu_to_le16(prm.rq_result);
				pkt->u.status0.residual =
				    cpu_to_le32(prm.residual);
3147
				pkt->u.status0.flags |= cpu_to_le16(
3148
				    CTIO7_FLAGS_SEND_STATUS);
3149
				if (qlt_need_explicit_conf(cmd, 0)) {
3150
					pkt->u.status0.flags |=
3151
					    cpu_to_le16(
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
						CTIO7_FLAGS_EXPLICIT_CONFORM |
						CTIO7_FLAGS_CONFORM_REQ);
				}
			}

		} else {
			/*
			 * We have already made sure that there is sufficient
			 * amount of request entries to not drop HW lock in
			 * req_pkt().
			 */
			struct ctio7_to_24xx *ctio =
3164 3165
				(struct ctio7_to_24xx *)qlt_get_req_pkt(
				    qpair->req);
3166

3167
			ql_dbg(ql_dbg_tgt, vha, 0x305e,
3168 3169
			    "Building additional status packet 0x%p.\n",
			    ctio);
3170

3171 3172 3173 3174
			/*
			 * T10Dif: ctio_crc2_to_fw overlay ontop of
			 * ctio7_to_24xx
			 */
3175
			memcpy(ctio, pkt, sizeof(*ctio));
3176
			/* reset back to CTIO7 */
3177
			ctio->entry_count = 1;
3178
			ctio->entry_type = CTIO_TYPE7;
3179
			ctio->dseg_count = 0;
3180
			ctio->u.status1.flags &= ~cpu_to_le16(
3181 3182 3183 3184
			    CTIO7_FLAGS_DATA_IN);

			/* Real finish is ctio_m1's finish */
			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3185
			pkt->u.status0.flags |= cpu_to_le16(
3186
			    CTIO7_FLAGS_DONT_RET_CTIO);
3187 3188 3189 3190 3191

			/* qlt_24xx_init_ctio_to_isp will correct
			 * all neccessary fields that's part of CTIO7.
			 * There should be no residual of CTIO-CRC2 data.
			 */
3192 3193 3194 3195 3196 3197 3198 3199 3200
			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
			    &prm);
			pr_debug("Status CTIO7: %p\n", ctio);
		}
	} else
		qlt_24xx_init_ctio_to_isp(pkt, &prm);


	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3201
	cmd->cmd_sent_to_fw = 1;
3202

3203 3204
	/* Memory Barrier */
	wmb();
3205 3206 3207 3208
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3209
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3210 3211 3212 3213

	return 0;

out_unmap_unlock:
J
Joern Engel 已提交
3214
	qlt_unmap_sg(vha, cmd);
3215
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226

	return res;
}
EXPORT_SYMBOL(qlt_xmit_response);

int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
	struct ctio7_to_24xx *pkt;
	struct scsi_qla_host *vha = cmd->vha;
	struct qla_tgt *tgt = cmd->tgt;
	struct qla_tgt_prm prm;
3227
	unsigned long flags = 0;
3228
	int res = 0;
3229
	struct qla_qpair *qpair = cmd->qpair;
3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240

	memset(&prm, 0, sizeof(prm));
	prm.cmd = cmd;
	prm.tgt = tgt;
	prm.sg = NULL;
	prm.req_cnt = 1;

	/* Calculate number of entries and segments required */
	if (qlt_pci_map_calc_cnt(&prm) != 0)
		return -EAGAIN;

3241
	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3242
	    (cmd->sess && cmd->sess->deleted)) {
3243
		/*
3244
		 * Either the port is not online or this request was from
3245 3246 3247 3248 3249
		 * previous life, just abort the processing.
		 */
		cmd->state = QLA_TGT_STATE_NEED_DATA;
		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
		ql_dbg(ql_dbg_async, vha, 0xe102,
3250 3251
			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
			vha->flags.online, qla2x00_reset_active(vha),
3252
			cmd->reset_count, qpair->chip_reset);
3253 3254 3255
		return 0;
	}

3256
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3257
	/* Does F/W have an IOCBs for this request */
3258
	res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3259 3260
	if (res != 0)
		goto out_unlock_free_unmap;
3261
	if (cmd->se_cmd.prot_op)
3262
		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3263
	else
3264
		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3265

3266
	if (unlikely(res != 0)) {
3267
		qpair->req->cnt += prm.req_cnt;
3268
		goto out_unlock_free_unmap;
3269 3270
	}

3271
	pkt = (struct ctio7_to_24xx *)prm.pkt;
3272
	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3273
	    CTIO7_FLAGS_STATUS_MODE_0);
3274 3275 3276

	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
		qlt_load_data_segments(&prm, vha);
3277 3278

	cmd->state = QLA_TGT_STATE_NEED_DATA;
3279
	cmd->cmd_sent_to_fw = 1;
3280

3281 3282
	/* Memory Barrier */
	wmb();
3283 3284 3285 3286
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3287
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3288 3289 3290 3291

	return res;

out_unlock_free_unmap:
J
Joern Engel 已提交
3292
	qlt_unmap_sg(vha, cmd);
3293
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3294 3295 3296 3297 3298

	return res;
}
EXPORT_SYMBOL(qlt_rdy_to_xfer);

3299 3300

/*
3301
 * it is assumed either hardware_lock or qpair lock is held.
3302
 */
3303
static void
3304
qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3305
	struct ctio_crc_from_fw *sts)
3306 3307 3308 3309
{
	uint8_t		*ap = &sts->actual_dif[0];
	uint8_t		*ep = &sts->expected_dif[0];
	uint64_t	lba = cmd->se_cmd.t_task_lba;
3310 3311
	uint8_t scsi_status, sense_key, asc, ascq;
	unsigned long flags;
3312
	struct scsi_qla_host *vha = cmd->vha;
3313

3314
	cmd->trc_flags |= TRC_DIF_ERR;
3315

3316 3317 3318
	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3319

3320 3321 3322
	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3323

3324 3325
	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3326

3327
	scsi_status = sense_key = asc = ascq = 0;
3328

3329 3330
	/* check appl tag */
	if (cmd->e_app_tag != cmd->a_app_tag) {
3331 3332 3333 3334 3335 3336
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3337 3338 3339 3340 3341 3342

		cmd->dif_err_code = DIF_ERR_APP;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x2;
3343 3344 3345
	}

	/* check ref tag */
3346
	if (cmd->e_ref_tag != cmd->a_ref_tag) {
3347 3348 3349 3350 3351 3352
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3353 3354 3355 3356 3357 3358

		cmd->dif_err_code = DIF_ERR_REF;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x3;
3359 3360 3361
		goto out;
	}

3362 3363
	/* check guard */
	if (cmd->e_guard != cmd->a_guard) {
3364 3365 3366 3367 3368 3369 3370
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
		    cmd->atio.u.isp24.fcp_hdr.ox_id);

3371 3372 3373 3374 3375
		cmd->dif_err_code = DIF_ERR_GRD;
		scsi_status = SAM_STAT_CHECK_CONDITION;
		sense_key = ABORTED_COMMAND;
		asc = 0x10;
		ascq = 0x1;
3376 3377
	}
out:
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391
	switch (cmd->state) {
	case QLA_TGT_STATE_NEED_DATA:
		/* handle_data will load DIF error code  */
		cmd->state = QLA_TGT_STATE_DATA_IN;
		vha->hw->tgt.tgt_ops->handle_data(cmd);
		break;
	default:
		spin_lock_irqsave(&cmd->cmd_lock, flags);
		if (cmd->aborted) {
			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
			vha->hw->tgt.tgt_ops->free_cmd(cmd);
			break;
		}
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3392

3393 3394
		qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
		    ascq);
3395 3396 3397 3398 3399 3400 3401
		/* assume scsi status gets out on the wire.
		 * Will not wait for completion.
		 */
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		break;
	}
}
3402

3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy)
{
	struct nack_to_isp *nack;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;

	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
	    "Sending TERM ELS CTIO (ha=%p)\n", ha);

3416
	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3417 3418 3419 3420 3421 3422 3423 3424 3425
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe080,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;
3426
	pkt->handle = QLA_TGT_SKIP_HANDLE;
3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
	}

	/* terminate */
	nack->u.isp24.flags |=
		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);

	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	qla2x00_start_iocbs(vha, vha->req);
	return ret;
}

static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked)
{
	unsigned long flags = 0;
	int rc;

	if (ha_locked) {
		rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo  */
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3466 3467 3468
#else
		if (rc) {
		}
3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485
#endif
		goto done;
	}

	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
	rc = __qlt_send_term_imm_notif(vha, imm);

#if 0	/* Todo */
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
#endif

done:
	if (!ha_locked)
		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}

3486 3487 3488 3489
/*
 * If hardware_lock held on entry, might drop it, then reaquire
 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
 */
3490
static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3491 3492 3493
	struct qla_tgt_cmd *cmd,
	struct atio_from_isp *atio)
{
3494
	struct scsi_qla_host *vha = qpair->vha;
3495 3496 3497 3498
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	int ret = 0;
3499
	uint16_t temp;
3500

3501
	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3502

3503
	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520
	if (pkt == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe050,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return -ENOMEM;
	}

	if (cmd != NULL) {
		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
			ql_dbg(ql_dbg_tgt, vha, 0xe051,
			    "qla_target(%d): Terminating cmd %p with "
			    "incorrect state %d\n", vha->vp_idx, cmd,
			    cmd->state);
		} else
			ret = 1;
	}

3521
	vha->tgt_counters.num_term_xchg_sent++;
3522 3523 3524 3525 3526
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
3527
	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3528
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3529 3530 3531 3532 3533
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3534 3535 3536
	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
		CTIO7_FLAGS_TERMINATE;
	ctio24->u.status1.flags = cpu_to_le16(temp);
3537 3538
	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3539 3540 3541 3542 3543 3544 3545 3546

	/* Most likely, it isn't needed */
	ctio24->u.status1.residual = get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
	if (ctio24->u.status1.residual != 0)
		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;

3547 3548
	/* Memory Barrier */
	wmb();
3549 3550 3551 3552
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
3553 3554 3555
	return ret;
}

3556
static void qlt_send_term_exchange(struct qla_qpair *qpair,
3557 3558
	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
	int ul_abort)
3559
{
3560
	struct scsi_qla_host *vha;
3561
	unsigned long flags = 0;
3562 3563
	int rc;

3564 3565 3566 3567 3568
	/* why use different vha? NPIV */
	if (cmd)
		vha = cmd->vha;
	else
		vha = qpair->vha;
3569 3570

	if (ha_locked) {
3571
		rc = __qlt_send_term_exchange(qpair, cmd, atio);
3572 3573
		if (rc == -ENOMEM)
			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3574 3575
		goto done;
	}
3576 3577
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	rc = __qlt_send_term_exchange(qpair, cmd, atio);
3578 3579
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3580

3581
done:
3582
	if (cmd && !ul_abort && !cmd->aborted) {
3583 3584
		if (cmd->sg_mapped)
			qlt_unmap_sg(vha, cmd);
3585 3586
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
	}
3587 3588

	if (!ha_locked)
3589
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3590

3591
	return;
3592 3593
}

3594 3595 3596 3597 3598 3599
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
	struct list_head free_list;
	struct qla_tgt_cmd *cmd, *tcmd;

	vha->hw->tgt.leak_exchg_thresh_hold =
3600
	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629

	cmd = tcmd = NULL;
	if (!list_empty(&vha->hw->tgt.q_full_list)) {
		INIT_LIST_HEAD(&free_list);
		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);

		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
			list_del(&cmd->cmd_list);
			/* This cmd was never sent to TCM.  There is no need
			 * to schedule free or call free_cmd
			 */
			qlt_free_cmd(cmd);
			vha->hw->tgt.num_qfull_cmds_alloc--;
		}
	}
	vha->hw->tgt.num_qfull_cmds_dropped = 0;
}

static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
{
	uint32_t total_leaked;

	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;

	if (vha->hw->tgt.leak_exchg_thresh_hold &&
	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {

		ql_dbg(ql_dbg_tgt, vha, 0xe079,
		    "Chip reset due to exchange starvation: %d/%d.\n",
3630
		    total_leaked, vha->hw->cur_fw_xcb_count);
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640

		if (IS_P3P_TYPE(vha->hw))
			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
		else
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		qla2xxx_wake_dpc(vha);
	}

}

3641
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3642 3643 3644 3645
{
	struct qla_tgt *tgt = cmd->tgt;
	struct scsi_qla_host *vha = tgt->vha;
	struct se_cmd *se_cmd = &cmd->se_cmd;
3646
	unsigned long flags;
3647 3648 3649 3650 3651 3652

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
	    "qla_target(%d): terminating exchange for aborted cmd=%p "
	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
	    se_cmd->tag);

3653 3654 3655 3656 3657 3658 3659 3660
	spin_lock_irqsave(&cmd->cmd_lock, flags);
	if (cmd->aborted) {
		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
		/*
		 * It's normal to see 2 calls in this path:
		 *  1) XFER Rdy completion + CMD_T_ABORT
		 *  2) TCM TMR - drain_state_list
		 */
3661 3662 3663 3664
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
		    "multiple abort. %p transport_state %x, t_state %x, "
		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3665 3666
		return EIO;
	}
3667
	cmd->aborted = 1;
3668
	cmd->trc_flags |= TRC_ABORT;
3669
	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3670

3671
	qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3672
	return 0;
3673 3674 3675
}
EXPORT_SYMBOL(qlt_abort_cmd);

3676 3677
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
3678
	struct fc_port *sess = cmd->sess;
3679

3680 3681 3682 3683
	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
	    "%s: se_cmd[%p] ox_id %04x\n",
	    __func__, &cmd->se_cmd,
	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3684

3685 3686
	BUG_ON(cmd->cmd_in_wq);

3687 3688 3689
	if (cmd->sg_mapped)
		qlt_unmap_sg(cmd->vha, cmd);

3690 3691 3692
	if (!cmd->q_full)
		qlt_decr_num_pend_cmds(cmd->vha);

3693
	BUG_ON(cmd->sg_mapped);
3694
	cmd->jiffies_at_free = get_jiffies_64();
3695 3696
	if (unlikely(cmd->free_sg))
		kfree(cmd->sg);
3697 3698 3699 3700 3701

	if (!sess || !sess->se_sess) {
		WARN_ON(1);
		return;
	}
3702
	cmd->jiffies_at_free = get_jiffies_64();
3703
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3704 3705 3706 3707 3708 3709
}
EXPORT_SYMBOL(qlt_free_cmd);

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
3710
static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3711 3712 3713
	struct qla_tgt_cmd *cmd, uint32_t status)
{
	int term = 0;
3714
	struct scsi_qla_host *vha = qpair->vha;
3715

3716
	if (cmd->se_cmd.prot_op)
3717
		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3718 3719 3720 3721 3722 3723 3724 3725
		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
		    "se_cmd=%p tag[%x] op %#x/%s",
		     cmd->lba, cmd->lba,
		     cmd->num_blks, &cmd->se_cmd,
		     cmd->atio.u.isp24.exchange_addr,
		     cmd->se_cmd.prot_op,
		     prot_op_str(cmd->se_cmd.prot_op));

3726 3727 3728
	if (ctio != NULL) {
		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
		term = !(c->flags &
3729
		    cpu_to_le16(OF_TERM_EXCH));
3730 3731 3732 3733
	} else
		term = 1;

	if (term)
3734
		qlt_term_ctio_exchange(qpair, ctio, cmd, status);
3735 3736 3737 3738 3739 3740 3741

	return term;
}


/* ha->hardware_lock supposed to be held on entry */
static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3742
	struct rsp_que *rsp, uint32_t handle, void *ctio)
3743 3744
{
	struct qla_tgt_cmd *cmd = NULL;
3745 3746 3747
	struct req_que *req;
	int qid = GET_QID(handle);
	uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3748

3749 3750
	if (unlikely(h == QLA_TGT_SKIP_HANDLE))
		return NULL;
3751

3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
	if (qid == rsp->req->id) {
		req = rsp->req;
	} else if (vha->hw->req_q_map[qid]) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
		    "qla_target(%d): CTIO completion with different QID %d handle %x\n",
		    vha->vp_idx, rsp->id, handle);
		req = vha->hw->req_q_map[qid];
	} else {
		return NULL;
	}
3762

3763
	h &= QLA_CMD_HANDLE_MASK;
3764

3765 3766
	if (h != QLA_TGT_NULL_HANDLE) {
		if (unlikely(h > req->num_outstanding_cmds)) {
3767 3768 3769 3770 3771
			ql_dbg(ql_dbg_tgt, vha, 0xe052,
			    "qla_target(%d): Wrong handle %x received\n",
			    vha->vp_idx, handle);
			return NULL;
		}
3772 3773 3774

		cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
		if (unlikely(cmd == NULL)) {
3775
			ql_dbg(ql_dbg_async, vha, 0xe053,
3776 3777
			    "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
				vha->vp_idx, handle, req->id, rsp->id);
3778 3779
			return NULL;
		}
3780
		req->outstanding_cmds[h] = NULL;
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791
	} else if (ctio != NULL) {
		/* We can't get loop ID from CTIO7 */
		ql_dbg(ql_dbg_tgt, vha, 0xe054,
		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
		    "support NULL handles\n", vha->vp_idx);
		return NULL;
	}

	return cmd;
}

3792
/* hardware_lock should be held by caller. */
3793
void
3794 3795 3796 3797 3798 3799 3800 3801 3802 3803
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
	struct qla_hw_data *ha = vha->hw;

	if (cmd->sg_mapped)
		qlt_unmap_sg(vha, cmd);

	/* TODO: fix debug message type and ids. */
	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
		ql_dbg(ql_dbg_io, vha, 0xff00,
3804
		    "HOST-ABORT: state=PROCESSED.\n");
3805 3806 3807 3808 3809
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->write_data_transferred = 0;
		cmd->state = QLA_TGT_STATE_DATA_IN;

		ql_dbg(ql_dbg_io, vha, 0xff01,
3810
		    "HOST-ABORT: state=DATA_IN.\n");
3811 3812 3813 3814 3815

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
	} else {
		ql_dbg(ql_dbg_io, vha, 0xff03,
3816
		    "HOST-ABORT: state=BAD(%d).\n",
3817 3818 3819 3820
		    cmd->state);
		dump_stack();
	}

3821
	cmd->trc_flags |= TRC_FLUSH;
3822 3823 3824
	ha->tgt.tgt_ops->free_cmd(cmd);
}

3825 3826 3827
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
3828 3829
static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
    struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3830 3831 3832 3833
{
	struct qla_hw_data *ha = vha->hw;
	struct se_cmd *se_cmd;
	struct qla_tgt_cmd *cmd;
3834
	struct qla_qpair *qpair = rsp->qpair;
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845

	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
		/* That could happen only in case of an error/reset/abort */
		if (status != CTIO_SUCCESS) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
			    "Intermediate CTIO received"
			    " (status %x)\n", status);
		}
		return;
	}

3846
	cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3847
	if (cmd == NULL)
3848
		return;
3849

3850
	se_cmd = &cmd->se_cmd;
3851
	cmd->cmd_sent_to_fw = 0;
3852

J
Joern Engel 已提交
3853
	qlt_unmap_sg(vha, cmd);
3854 3855 3856 3857 3858 3859

	if (unlikely(status != CTIO_SUCCESS)) {
		switch (status & 0xFFFF) {
		case CTIO_LIP_RESET:
		case CTIO_TARGET_RESET:
		case CTIO_ABORTED:
3860
			/* driver request abort via Terminate exchange */
3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873
		case CTIO_TIMEOUT:
		case CTIO_INVALID_RX_ID:
			/* They are OK */
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
			    "qla_target(%d): CTIO with "
			    "status %#x received, state %x, se_cmd %p, "
			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
			    status, cmd->state, se_cmd);
			break;

		case CTIO_PORT_LOGGED_OUT:
		case CTIO_PORT_UNAVAILABLE:
3874
		{
3875 3876 3877
			int logged_out =
				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;

3878
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3879
			    "qla_target(%d): CTIO with %s status %x "
3880
			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
3881
			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3882 3883
			    status, cmd->state, se_cmd);

3884 3885 3886 3887 3888 3889 3890
			if (logged_out && cmd->sess) {
				/*
				 * Session is already logged out, but we need
				 * to notify initiator, who's not aware of this
				 */
				cmd->sess->logout_on_delete = 0;
				cmd->sess->send_els_logo = 1;
3891
				ql_dbg(ql_dbg_disc, vha, 0x20f8,
3892 3893 3894 3895
				    "%s %d %8phC post del sess\n",
				    __func__, __LINE__, cmd->sess->port_name);

				qlt_schedule_sess_for_deletion_lock(cmd->sess);
3896 3897 3898
			}
			break;
		}
3899 3900 3901 3902
		case CTIO_DIF_ERROR: {
			struct ctio_crc_from_fw *crc =
				(struct ctio_crc_from_fw *)ctio;
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3903 3904 3905
			    "qla_target(%d): CTIO with DIF_ERROR status %x "
			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
			    "expect_dif[0x%llx]\n",
3906 3907 3908 3909
			    vha->vp_idx, status, cmd->state, se_cmd,
			    *((u64 *)&crc->actual_dif[0]),
			    *((u64 *)&crc->expected_dif[0]));

3910
			qlt_handle_dif_error(qpair, cmd, ctio);
3911
			return;
3912
		}
3913 3914
		default:
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3915
			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3916 3917 3918 3919
			    vha->vp_idx, status, cmd->state, se_cmd);
			break;
		}

3920

3921
		/* "cmd->aborted" means
3922 3923 3924 3925 3926 3927
		 * cmd is already aborted/terminated, we don't
		 * need to terminate again.  The exchange is already
		 * cleaned up/freed at FW level.  Just cleanup at driver
		 * level.
		 */
		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3928
		    (!cmd->aborted)) {
3929
			cmd->trc_flags |= TRC_CTIO_ERR;
3930
			if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
3931
				return;
3932
		}
3933 3934 3935
	}

	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3936
		cmd->trc_flags |= TRC_CTIO_DONE;
3937 3938 3939
	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
		cmd->state = QLA_TGT_STATE_DATA_IN;

3940
		if (status == CTIO_SUCCESS)
3941 3942 3943 3944
			cmd->write_data_transferred = 1;

		ha->tgt.tgt_ops->handle_data(cmd);
		return;
3945
	} else if (cmd->aborted) {
3946
		cmd->trc_flags |= TRC_CTIO_ABORTED;
3947
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3948
		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3949
	} else {
3950
		cmd->trc_flags |= TRC_CTIO_STRANGE;
3951 3952 3953 3954 3955
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
		    "qla_target(%d): A command in state (%d) should "
		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
	}

3956
	if (unlikely(status != CTIO_SUCCESS) &&
3957
		!cmd->aborted) {
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
		dump_stack();
	}

	ha->tgt.tgt_ops->free_cmd(cmd);
}

static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
	uint8_t task_codes)
{
	int fcp_task_attr;

	switch (task_codes) {
	case ATIO_SIMPLE_QUEUE:
C
Christoph Hellwig 已提交
3972
		fcp_task_attr = TCM_SIMPLE_TAG;
3973 3974
		break;
	case ATIO_HEAD_OF_QUEUE:
C
Christoph Hellwig 已提交
3975
		fcp_task_attr = TCM_HEAD_TAG;
3976 3977
		break;
	case ATIO_ORDERED_QUEUE:
C
Christoph Hellwig 已提交
3978
		fcp_task_attr = TCM_ORDERED_TAG;
3979 3980
		break;
	case ATIO_ACA_QUEUE:
C
Christoph Hellwig 已提交
3981
		fcp_task_attr = TCM_ACA_TAG;
3982 3983
		break;
	case ATIO_UNTAGGED:
C
Christoph Hellwig 已提交
3984
		fcp_task_attr = TCM_SIMPLE_TAG;
3985 3986 3987 3988 3989
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
		    "qla_target: unknown task code %x, use ORDERED instead\n",
		    task_codes);
C
Christoph Hellwig 已提交
3990
		fcp_task_attr = TCM_ORDERED_TAG;
3991 3992 3993 3994 3995 3996
		break;
	}

	return fcp_task_attr;
}

3997
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
3998 3999 4000 4001
					uint8_t *);
/*
 * Process context for I/O path into tcm_qla2xxx code
 */
4002
static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4003 4004 4005
{
	scsi_qla_host_t *vha = cmd->vha;
	struct qla_hw_data *ha = vha->hw;
4006
	struct fc_port *sess = cmd->sess;
4007 4008 4009 4010 4011
	struct atio_from_isp *atio = &cmd->atio;
	unsigned char *cdb;
	unsigned long flags;
	uint32_t data_length;
	int ret, fcp_task_attr, data_dir, bidi = 0;
4012
	struct qla_qpair *qpair = cmd->qpair;
4013

4014
	cmd->cmd_in_wq = 0;
4015
	cmd->trc_flags |= TRC_DO_WORK;
4016

4017
	if (cmd->aborted) {
4018 4019 4020 4021 4022 4023
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
		    "cmd with tag %u is aborted\n",
		    cmd->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4024
	spin_lock_init(&cmd->cmd_lock);
4025
	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4026
	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044

	if (atio->u.isp24.fcp_cmnd.rddata &&
	    atio->u.isp24.fcp_cmnd.wrdata) {
		bidi = 1;
		data_dir = DMA_TO_DEVICE;
	} else if (atio->u.isp24.fcp_cmnd.rddata)
		data_dir = DMA_FROM_DEVICE;
	else if (atio->u.isp24.fcp_cmnd.wrdata)
		data_dir = DMA_TO_DEVICE;
	else
		data_dir = DMA_NONE;

	fcp_task_attr = qlt_get_fcp_task_attr(vha,
	    atio->u.isp24.fcp_cmnd.task_attr);
	data_length = be32_to_cpu(get_unaligned((uint32_t *)
	    &atio->u.isp24.fcp_cmnd.add_cdb[
	    atio->u.isp24.fcp_cmnd.add_cdb_len]));

4045 4046
	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
				          fcp_task_attr, data_dir, bidi);
4047 4048 4049 4050 4051
	if (ret != 0)
		goto out_term;
	/*
	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
	 */
4052
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4053
	ha->tgt.tgt_ops->put_sess(sess);
4054
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4055 4056 4057
	return;

out_term:
4058
	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4059
	/*
4060 4061
	 * cmd has not sent to target yet, so pass NULL as the second
	 * argument to qlt_send_term_exchange() and free the memory here.
4062
	 */
4063
	cmd->trc_flags |= TRC_DO_WORK_ERR;
4064 4065
	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4066 4067

	qlt_decr_num_pend_cmds(vha);
4068
	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
4069
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4070 4071

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4072
	ha->tgt.tgt_ops->put_sess(sess);
4073
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4074 4075 4076 4077 4078
}

static void qlt_do_work(struct work_struct *work)
{
	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4079 4080 4081 4082 4083 4084
	scsi_qla_host_t *vha = cmd->vha;
	unsigned long flags;

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&cmd->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4085 4086 4087 4088

	__qlt_do_work(cmd);
}

4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
void qlt_clr_qp_table(struct scsi_qla_host *vha)
{
	unsigned long flags;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	void *node;
	u64 key = 0;

	ql_log(ql_log_info, vha, 0x706c,
	    "User update Number of Active Qpairs %d\n",
	    ha->tgt.num_act_qpairs);

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);

	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
		btree_remove64(&tgt->lun_qpair_map, key);

	ha->base_qpair->lun_cnt = 0;
	for (key = 0; key < ha->max_qpairs; key++)
		if (ha->queue_pair_map[key])
			ha->queue_pair_map[key]->lun_cnt = 0;

	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
}

4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
static void qlt_assign_qpair(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd)
{
	struct qla_qpair *qpair, *qp;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_qpair_hint *h;

	if (vha->flags.qpairs_available) {
		h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
		if (unlikely(!h)) {
			/* spread lun to qpair ratio evently */
			int lcnt = 0, rc;
			struct scsi_qla_host *base_vha =
				pci_get_drvdata(vha->hw->pdev);

			qpair = vha->hw->base_qpair;
			if (qpair->lun_cnt == 0) {
				qpair->lun_cnt++;
				h = qla_qpair_to_hint(tgt, qpair);
				BUG_ON(!h);
				rc = btree_insert64(&tgt->lun_qpair_map,
					cmd->unpacked_lun, h, GFP_ATOMIC);
				if (rc) {
					qpair->lun_cnt--;
					ql_log(ql_log_info, vha, 0xd037,
					    "Unable to insert lun %llx into lun_qpair_map\n",
					    cmd->unpacked_lun);
				}
				goto out;
			} else {
				lcnt = qpair->lun_cnt;
			}

			h = NULL;
			list_for_each_entry(qp, &base_vha->qp_list,
			    qp_list_elem) {
				if (qp->lun_cnt == 0) {
					qp->lun_cnt++;
					h = qla_qpair_to_hint(tgt, qp);
					BUG_ON(!h);
					rc = btree_insert64(&tgt->lun_qpair_map,
					    cmd->unpacked_lun, h, GFP_ATOMIC);
					if (rc) {
						qp->lun_cnt--;
						ql_log(ql_log_info, vha, 0xd038,
							"Unable to insert lun %llx into lun_qpair_map\n",
							cmd->unpacked_lun);
					}
					qpair = qp;
					goto out;
				} else {
					if (qp->lun_cnt < lcnt) {
						lcnt = qp->lun_cnt;
						qpair = qp;
						continue;
					}
				}
			}
			BUG_ON(!qpair);
			qpair->lun_cnt++;
			h = qla_qpair_to_hint(tgt, qpair);
			BUG_ON(!h);
			rc = btree_insert64(&tgt->lun_qpair_map,
				cmd->unpacked_lun, h, GFP_ATOMIC);
			if (rc) {
				qpair->lun_cnt--;
				ql_log(ql_log_info, vha, 0xd039,
				   "Unable to insert lun %llx into lun_qpair_map\n",
				   cmd->unpacked_lun);
			}
		}
	} else {
		h = &tgt->qphints[0];
	}
out:
	cmd->qpair = h->qpair;
	cmd->se_cmd.cpuid = h->cpuid;
}

4193
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4194
				       struct fc_port *sess,
4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
				       struct atio_from_isp *atio)
{
	struct se_session *se_sess = sess->se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return NULL;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4207
	cmd->cmd_type = TYPE_TGT_CMD;
4208 4209 4210
	memcpy(&cmd->atio, atio, sizeof(*atio));
	cmd->state = QLA_TGT_STATE_NEW;
	cmd->tgt = vha->vha_tgt.qla_tgt;
4211
	qlt_incr_num_pend_cmds(vha);
4212 4213 4214 4215 4216 4217
	cmd->vha = vha;
	cmd->se_cmd.map_tag = tag;
	cmd->sess = sess;
	cmd->loop_id = sess->loop_id;
	cmd->conf_compl_supported = sess->conf_compl_supported;

4218
	cmd->trc_flags = 0;
4219 4220
	cmd->jiffies_at_alloc = get_jiffies_64();

4221 4222 4223
	cmd->unpacked_lun = scsilun_to_int(
	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
	qlt_assign_qpair(vha, cmd);
4224
	cmd->reset_count = vha->hw->base_qpair->chip_reset;
4225

4226 4227 4228 4229 4230 4231 4232 4233 4234
	return cmd;
}

static void qlt_create_sess_from_atio(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
					struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
4235
	struct fc_port *sess;
4236 4237 4238 4239
	struct qla_tgt_cmd *cmd;
	unsigned long flags;
	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;

4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_del(&op->cmd_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	if (op->aborted) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
		    "sess_op with tag %u is aborted\n",
		    op->atio.u.isp24.exchange_addr);
		goto out_term;
	}

4251
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
4252 4253 4254
	    "qla_target(%d): Unable to find wwn login"
	    " (s_id %x:%x:%x), trying to create it manually\n",
	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
4255 4256 4257

	if (op->atio.u.raw.entry_count > 1) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
4258
		    "Dropping multy entry atio %p\n", &op->atio);
4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273
		goto out_term;
	}

	sess = qlt_make_local_sess(vha, s_id);
	/* sess has an extra creation ref. */

	if (!sess)
		goto out_term;
	/*
	 * Now obtain a pre-allocated session tag using the original op->atio
	 * packet header, and dispatch into __qlt_do_work() using the existing
	 * process context.
	 */
	cmd = qlt_get_tag(vha, sess, &op->atio);
	if (!cmd) {
4274 4275 4276 4277 4278 4279 4280
		struct qla_qpair *qpair = ha->base_qpair;

		spin_lock_irqsave(qpair->qp_lock_ptr, flags);
		qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4281
		ha->tgt.tgt_ops->put_sess(sess);
4282
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4283 4284 4285
		kfree(op);
		return;
	}
4286

4287
	/*
4288
	 * __qlt_do_work() will call qlt_put_sess() to release
4289 4290 4291 4292 4293 4294
	 * the extra reference taken above by qlt_make_local_sess()
	 */
	__qlt_do_work(cmd);
	kfree(op);
	return;
out_term:
4295
	qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
4296
	kfree(op);
4297 4298 4299 4300 4301 4302
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
	struct atio_from_isp *atio)
{
4303
	struct qla_hw_data *ha = vha->hw;
4304
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4305
	struct fc_port *sess;
4306
	struct qla_tgt_cmd *cmd;
4307
	unsigned long flags;
4308 4309

	if (unlikely(tgt->tgt_stop)) {
4310
		ql_dbg(ql_dbg_io, vha, 0x3061,
4311 4312 4313 4314
		    "New command while device %p is shutting down\n", tgt);
		return -EFAULT;
	}

4315 4316 4317 4318 4319 4320 4321 4322
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
	if (unlikely(!sess)) {
		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
						     GFP_ATOMIC);
		if (!op)
			return -ENOMEM;

		memcpy(&op->atio, atio, sizeof(*atio));
4323
		op->vha = vha;
4324

4325
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
4326
		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
4327
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4328

4329 4330 4331 4332
		INIT_WORK(&op->work, qlt_create_sess_from_atio);
		queue_work(qla_tgt_wq, &op->work);
		return 0;
	}
4333 4334 4335

	/* Another WWN used to have our s_id. Our PLOGI scheduled its
	 * session deletion, but it's still in sess_del_work wq */
4336
	if (sess->deleted) {
4337
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4338 4339 4340 4341 4342
		    "New command while old session %p is being deleted\n",
		    sess);
		return -EFAULT;
	}

4343 4344 4345
	/*
	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
	 */
4346
	if (!kref_get_unless_zero(&sess->sess_kref)) {
4347
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4348 4349 4350 4351 4352
		    "%s: kref_get fail, %8phC oxid %x \n",
		    __func__, sess->port_name,
		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
		return -EFAULT;
	}
4353 4354

	cmd = qlt_get_tag(vha, sess, atio);
4355
	if (!cmd) {
4356
		ql_dbg(ql_dbg_io, vha, 0x3062,
4357
		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4358 4359 4360
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		ha->tgt.tgt_ops->put_sess(sess);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4361 4362 4363
		return -ENOMEM;
	}

4364
	cmd->cmd_in_wq = 1;
4365
	cmd->trc_flags |= TRC_NEW_CMD;
4366

4367
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4368
	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4369
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4370

4371
	INIT_WORK(&cmd->work, qlt_do_work);
4372 4373 4374
	if (vha->flags.qpairs_available) {
		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
	} else if (ha->msix_count) {
4375 4376 4377 4378 4379 4380 4381 4382 4383
		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
			queue_work_on(smp_processor_id(), qla_tgt_wq,
			    &cmd->work);
		else
			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
			    &cmd->work);
	} else {
		queue_work(qla_tgt_wq, &cmd->work);
	}
4384

4385
	return 0;
4386 4387 4388
}

/* ha->hardware_lock supposed to be held on entry */
4389
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4390 4391 4392 4393 4394
	int fn, void *iocb, int flags)
{
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4395
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414
	int res;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (!mcmd) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
		    "qla_target(%d): Allocation of management "
		    "command failed, some commands and their data could "
		    "leak\n", vha->vp_idx);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));
	mcmd->sess = sess;

	if (iocb) {
		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
		    sizeof(mcmd->orig_iocb.imm_ntfy));
	}
	mcmd->tmr_func = fn;
	mcmd->flags = flags;
4415
	mcmd->reset_count = ha->base_qpair->chip_reset;
4416
	mcmd->qpair = ha->base_qpair;
4417 4418 4419

	switch (fn) {
	case QLA_TGT_LUN_RESET:
4420 4421
	    abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
	    break;
4422 4423
	}

4424
	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441
	if (res != 0) {
		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
		    sess->vha->vp_idx, res);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt;
4442
	struct fc_port *sess;
4443
	u64 unpacked_lun;
4444
	int fn;
4445
	unsigned long flags;
4446

4447
	tgt = vha->vha_tgt.qla_tgt;
4448 4449

	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4450 4451

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4452 4453
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    a->u.isp24.fcp_hdr.s_id);
4454 4455
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4456 4457
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4458 4459 4460 4461 4462 4463 4464 4465 4466

	if (!sess) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
		    "qla_target(%d): task mgmt fn 0x%x for "
		    "non-existant session\n", vha->vp_idx, fn);
		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
		    sizeof(struct atio_from_isp));
	}

4467
	if (sess->deleted)
4468 4469
		return -EFAULT;

4470 4471 4472 4473 4474
	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}

/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
4475
	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4476 4477 4478 4479
{
	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt_mgmt_cmd *mcmd;
4480
	u64 unpacked_lun;
4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495
	int rc;

	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
	if (mcmd == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
		    vha->vp_idx, __func__);
		return -ENOMEM;
	}
	memset(mcmd, 0, sizeof(*mcmd));

	mcmd->sess = sess;
	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
	    sizeof(mcmd->orig_iocb.imm_ntfy));

4496 4497
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4498
	mcmd->reset_count = ha->base_qpair->chip_reset;
4499
	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4500
	mcmd->qpair = ha->base_qpair;
4501

4502
	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519
	    le16_to_cpu(iocb->u.isp2x.seq_id));
	if (rc != 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
		    vha->vp_idx, rc);
		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
		return -EFAULT;
	}

	return 0;
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_abort_task(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
4520
	struct fc_port *sess;
4521
	int loop_id;
4522
	unsigned long flags;
4523 4524 4525

	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);

4526
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4527
	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4528 4529
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

4530 4531 4532 4533
	if (sess == NULL) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
		    "qla_target(%d): task abort for unexisting "
		    "session\n", vha->vp_idx);
4534
		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4535 4536 4537 4538 4539 4540
		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
	}

	return __qlt_abort_task(vha, iocb, sess);
}

4541 4542
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556
	if (rc != MBS_COMMAND_COMPLETE) {
		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
			"%s: se_sess %p / sess %p from"
			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
			" LOGO failed: %#x\n",
			__func__,
			fcport->se_sess,
			fcport,
			fcport->port_name, fcport->loop_id,
			fcport->d_id.b.domain, fcport->d_id.b.area,
			fcport->d_id.b.al_pa, rc);
	}

	fcport->logout_completed = 1;
4557 4558 4559 4560 4561 4562 4563 4564 4565
}

/*
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
*
* Schedules sessions with matching port_id/loop_id but different wwn for
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
4566 4567
struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4568
    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4569
{
4570
	struct fc_port *sess = NULL, *other_sess;
4571 4572
	uint64_t other_wwn;

4573 4574
	*conflict_sess = NULL;

4575
	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4576 4577 4578 4579 4580 4581 4582 4583 4584 4585

		other_wwn = wwn_to_u64(other_sess->port_name);

		if (wwn == other_wwn) {
			WARN_ON(sess);
			sess = other_sess;
			continue;
		}

		/* find other sess with nport_id collision */
4586
		if (port_id.b24 == other_sess->d_id.b24) {
4587
			if (loop_id != other_sess->loop_id) {
4588
				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);

				/*
				 * logout_on_delete is set by default, but another
				 * session that has the same s_id/loop_id combo
				 * might have cleared it when requested this session
				 * deletion, so don't touch it
				 */
				qlt_schedule_sess_for_deletion(other_sess, true);
			} else {
				/*
				 * Another wwn used to have our s_id/loop_id
4602
				 * kill the session, but don't free the loop_id
4603
				 */
4604
				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4605 4606 4607 4608
				    "Invalidating sess %p loop_id %d wwn %llx.\n",
				    other_sess, other_sess->loop_id, other_wwn);


4609 4610
				other_sess->keep_nport_handle = 1;
				*conflict_sess = other_sess;
4611 4612 4613 4614 4615 4616 4617
				qlt_schedule_sess_for_deletion(other_sess,
				    true);
			}
			continue;
		}

		/* find other sess with nport handle collision */
4618 4619 4620
		if ((loop_id == other_sess->loop_id) &&
			(loop_id != FC_NO_LOOP_ID)) {
			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632
			       "Invalidating sess %p loop_id %d wwn %llx.\n",
			       other_sess, other_sess->loop_id, other_wwn);

			/* Same loop_id but different s_id
			 * Ok to kill and logout */
			qlt_schedule_sess_for_deletion(other_sess, true);
		}
	}

	return sess;
}

4633 4634 4635 4636 4637 4638 4639
/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
{
	struct qla_tgt_sess_op *op;
	struct qla_tgt_cmd *cmd;
	uint32_t key;
	int count = 0;
4640
	unsigned long flags;
4641 4642 4643 4644 4645

	key = (((u32)s_id->b.domain << 16) |
	       ((u32)s_id->b.area   <<  8) |
	       ((u32)s_id->b.al_pa));

4646
	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4647 4648
	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4649

4650 4651 4652 4653 4654
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}
4655 4656 4657 4658 4659 4660 4661 4662 4663

	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
		if (op_key == key) {
			op->aborted = true;
			count++;
		}
	}

4664 4665 4666
	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
		if (cmd_key == key) {
4667
			cmd->aborted = 1;
4668 4669 4670
			count++;
		}
	}
4671
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4672 4673 4674 4675

	return count;
}

4676 4677 4678 4679 4680 4681
/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
4682
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4683
	struct qla_hw_data *ha = vha->hw;
4684
	struct fc_port *sess = NULL, *conflict_sess = NULL;
4685 4686 4687 4688
	uint64_t wwn;
	port_id_t port_id;
	uint16_t loop_id;
	uint16_t wd3_lo;
4689
	int res = 0;
4690
	struct qlt_plogi_ack_t *pla;
4691
	unsigned long flags;
4692

4693 4694 4695 4696 4697 4698 4699 4700 4701
	wwn = wwn_to_u64(iocb->u.isp24.port_name);

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

4702 4703 4704 4705 4706 4707
	ql_dbg(ql_dbg_disc, vha, 0xf026,
	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
	    vha->vp_idx, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		   iocb->u.isp24.status_subcode, loop_id,
		iocb->u.isp24.port_name);
4708

4709 4710 4711
	/* res = 1 means ack at the end of thread
	 * res = 0 means ack async/later.
	 */
4712 4713
	switch (iocb->u.isp24.status_subcode) {
	case ELS_PLOGI:
4714

4715 4716 4717
		/* Mark all stale commands in qla_tgt_wq for deletion */
		abort_cmds_for_s_id(vha, &port_id);

4718 4719
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4720 4721
			sess = qlt_find_sess_invalidate_other(vha, wwn,
				port_id, loop_id, &conflict_sess);
4722 4723
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4724

4725
		if (IS_SW_RESV_ADDR(port_id)) {
4726 4727 4728 4729
			res = 1;
			break;
		}

4730 4731
		pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
		if (!pla) {
4732 4733 4734 4735 4736 4737
			qlt_send_term_imm_notif(vha, iocb, 1);
			break;
		}

		res = 0;

4738 4739
		if (conflict_sess) {
			conflict_sess->login_gen++;
4740
			qlt_plogi_ack_link(vha, pla, conflict_sess,
4741 4742
				QLT_PLOGI_LINK_CONFLICT);
		}
4743

4744 4745 4746 4747 4748
		if (!sess) {
			pla->ref_count++;
			qla24xx_post_newsess_work(vha, &port_id,
				iocb->u.isp24.port_name, pla);
			res = 0;
4749
			break;
4750
		}
4751 4752

		qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783
		sess->fw_login_state = DSC_LS_PLOGI_PEND;
		sess->d_id = port_id;
		sess->login_gen++;

		switch (sess->disc_state) {
		case DSC_DELETED:
			qlt_plogi_ack_unref(vha, pla);
			break;

		default:
			/*
			 * Under normal circumstances we want to release nport handle
			 * during LOGO process to avoid nport handle leaks inside FW.
			 * The exception is when LOGO is done while another PLOGI with
			 * the same nport handle is waiting as might be the case here.
			 * Note: there is always a possibily of a race where session
			 * deletion has already started for other reasons (e.g. ACL
			 * removal) and now PLOGI arrives:
			 * 1. if PLOGI arrived in FW after nport handle has been freed,
			 *    FW must have assigned this PLOGI a new/same handle and we
			 *    can proceed ACK'ing it as usual when session deletion
			 *    completes.
			 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
			 *    bit reached it, the handle has now been released. We'll
			 *    get an error when we ACK this PLOGI. Nothing will be sent
			 *    back to initiator. Initiator should eventually retry
			 *    PLOGI and situation will correct itself.
			 */
			sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
			   (sess->d_id.b24 == port_id.b24));

4784 4785 4786
			ql_dbg(ql_dbg_disc, vha, 0x20f9,
			    "%s %d %8phC post del sess\n",
			    __func__, __LINE__, sess->port_name);
4787 4788 4789 4790 4791 4792


			qlt_schedule_sess_for_deletion_lock(sess);
			break;
		}

4793 4794
		break;

4795
	case ELS_PRLI:
4796 4797
		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);

4798 4799
		if (wwn) {
			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4800 4801
			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
				loop_id, &conflict_sess);
4802 4803
			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
		}
4804 4805 4806 4807 4808 4809 4810 4811 4812

		if (conflict_sess) {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
			    "PRLI with conflicting sess %p port %8phC\n",
			    conflict_sess, conflict_sess->port_name);
			qlt_send_term_imm_notif(vha, iocb, 1);
			res = 0;
			break;
		}
4813 4814

		if (sess != NULL) {
4815 4816
			if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
			    sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4817 4818 4819 4820 4821
				/*
				 * Impatient initiator sent PRLI before last
				 * PLOGI could finish. Will force him to re-try,
				 * while last one finishes.
				 */
4822
				ql_log(ql_log_warn, sess->vha, 0xf095,
4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833
				    "sess %p PRLI received, before plogi ack.\n",
				    sess);
				qlt_send_term_imm_notif(vha, iocb, 1);
				res = 0;
				break;
			}

			/*
			 * This shouldn't happen under normal circumstances,
			 * since we have deleted the old session during PLOGI
			 */
4834
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4835 4836 4837 4838 4839
			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
			    sess->loop_id, sess, iocb->u.isp24.nport_handle);

			sess->local = 0;
			sess->loop_id = loop_id;
4840
			sess->d_id = port_id;
4841
			sess->fw_login_state = DSC_LS_PRLI_PEND;
4842 4843 4844 4845

			if (wd3_lo & BIT_7)
				sess->conf_compl_supported = 1;

4846 4847 4848 4849
			if ((wd3_lo & BIT_4) == 0)
				sess->port_type = FCT_INITIATOR;
			else
				sess->port_type = FCT_TARGET;
4850 4851 4852 4853 4854
		}
		res = 1; /* send notify ack */

		/* Make session global (not used in fabric mode) */
		if (ha->current_topology != ISP_CFG_F) {
4855
			if (sess) {
4856
				ql_dbg(ql_dbg_disc, vha, 0x20fa,
4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			} else {
				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
4867
		} else {
4868
			if (sess) {
4869
				ql_dbg(ql_dbg_disc, vha, 0x20fb,
4870 4871
				    "%s %d %8phC post nack\n",
				    __func__, __LINE__, sess->port_name);
4872 4873 4874 4875 4876
				qla24xx_post_nack_work(vha, sess, iocb,
					SRB_NACK_PRLI);
				res = 0;
			}
		}
4877 4878
		break;

4879 4880 4881 4882 4883 4884 4885 4886 4887
	case ELS_TPRLO:
		if (le16_to_cpu(iocb->u.isp24.flags) &
			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
			loop_id = 0xFFFF;
			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
			res = 1;
			break;
		}
		/* drop through */
4888 4889
	case ELS_LOGO:
	case ELS_PRLO:
4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		if (sess) {
			sess->login_gen++;
			sess->fw_login_state = DSC_LS_LOGO_PEND;
			sess->logo_ack_needed = 1;
			memcpy(sess->iocb, iocb, IOCB_SIZE);
		}

4901
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4902

4903
		ql_dbg(ql_dbg_disc, vha, 0x20fc,
4904 4905 4906
		    "%s: logo %llx res %d sess %p ",
		    __func__, wwn, res, sess);
		if (res == 0) {
4907 4908 4909 4910
			/*
			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
			 * for LOGO_ACK & sess delete
			 */
4911 4912 4913
			BUG_ON(!sess);
			res = 0;
		} else {
4914
			/* cmd did not go to upper layer. */
4915 4916 4917 4918 4919 4920
			if (sess) {
				qlt_schedule_sess_for_deletion_lock(sess);
				res = 0;
			}
			/* else logo will be ack */
		}
4921 4922 4923 4924
		break;
	case ELS_PDISC:
	case ELS_ADISC:
	{
4925
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4926
		if (tgt->link_reinit_iocb_pending) {
4927 4928
			qlt_send_notify_ack(ha->base_qpair,
			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
4929 4930
			tgt->link_reinit_iocb_pending = 0;
		}
4931 4932 4933 4934

		sess = qla2x00_find_fcport_by_wwpn(vha,
		    iocb->u.isp24.port_name, 1);
		if (sess) {
4935
			ql_dbg(ql_dbg_disc, vha, 0x20fd,
4936 4937 4938 4939 4940
				"sess %p lid %d|%d DS %d LS %d\n",
				sess, sess->loop_id, loop_id,
				sess->disc_state, sess->fw_login_state);
		}

4941 4942 4943 4944
		res = 1; /* send notify ack */
		break;
	}

4945
	case ELS_FLOGI:	/* should never happen */
4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
		    "qla_target(%d): Unsupported ELS command %x "
		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
		break;
	}

	return res;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *iocb)
{
	struct qla_hw_data *ha = vha->hw;
	uint32_t add_flags = 0;
	int send_notify_ack = 1;
	uint16_t status;

	status = le16_to_cpu(iocb->u.isp2x.status);
	switch (status) {
	case IMM_NTFY_LIP_RESET:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_LIP_LINK_REINIT:
	{
4984
		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4985 4986 4987 4988 4989 4990
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
		    "qla_target(%d): LINK REINIT (loop %#x, "
		    "subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);
		if (tgt->link_reinit_iocb_pending) {
4991 4992
			qlt_send_notify_ack(ha->base_qpair,
			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085
		}
		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
		tgt->link_reinit_iocb_pending = 1;
		/*
		 * QLogic requires to wait after LINK REINIT for possible
		 * PDISC or ADISC ELS commands
		 */
		send_notify_ack = 0;
		break;
	}

	case IMM_NTFY_PORT_LOGOUT:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
		    "qla_target(%d): Port logout (loop "
		    "%#x, subcode %x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp24.nport_handle),
		    iocb->u.isp24.status_subcode);

		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_TPRLO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_PORT_CONFIG:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
		    status);
		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
			send_notify_ack = 0;
		/* The sessions will be cleared in the callback, if needed */
		break;

	case IMM_NTFY_GLBL_LOGO:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
		    "qla_target(%d): Link failure detected\n",
		    vha->vp_idx);
		/* I_T nexus loss */
		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_IOCB_OVERFLOW:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
		    "qla_target(%d): Cannot provide requested "
		    "capability (IOCB overflowed the immediate notify "
		    "resource count)\n", vha->vp_idx);
		break;

	case IMM_NTFY_ABORT_TASK:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
		    "qla_target(%d): Abort Task (S %08x I %#x -> "
		    "L %#x)\n", vha->vp_idx,
		    le16_to_cpu(iocb->u.isp2x.seq_id),
		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
		    le16_to_cpu(iocb->u.isp2x.lun));
		if (qlt_abort_task(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_RESOURCE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
		    "qla_target(%d): Out of resources, host %ld\n",
		    vha->vp_idx, vha->host_no);
		break;

	case IMM_NTFY_MSG_RX:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
		    "qla_target(%d): Immediate notify task %x\n",
		    vha->vp_idx, iocb->u.isp2x.task_flags);
		if (qlt_handle_task_mgmt(vha, iocb) == 0)
			send_notify_ack = 0;
		break;

	case IMM_NTFY_ELS:
		if (qlt_24xx_handle_els(vha, iocb) == 0)
			send_notify_ack = 0;
		break;
	default:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
		    "qla_target(%d): Received unknown immediate "
		    "notify status %x\n", vha->vp_idx, status);
		break;
	}

	if (send_notify_ack)
5086 5087
		qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
		    0, 0);
5088 5089 5090 5091 5092 5093
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 * This function sends busy to ISP 2xxx or 24xx.
 */
5094
static int __qlt_send_busy(struct qla_qpair *qpair,
5095 5096
	struct atio_from_isp *atio, uint16_t status)
{
5097
	struct scsi_qla_host *vha = qpair->vha;
5098 5099 5100
	struct ctio7_to_24xx *ctio24;
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
5101
	struct fc_port *sess = NULL;
5102
	unsigned long flags;
5103
	u16 temp;
5104

5105
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5106 5107
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    atio->u.isp24.fcp_hdr.s_id);
5108
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5109
	if (!sess) {
5110
		qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5111
		return 0;
5112 5113 5114
	}
	/* Sending marker isn't necessary, since we called from ISR */

5115
	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5116
	if (!pkt) {
5117
		ql_dbg(ql_dbg_io, vha, 0x3063,
5118 5119
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
5120
		return -ENOMEM;
5121 5122
	}

5123
	vha->tgt_counters.num_q_full_sent++;
5124 5125 5126 5127 5128 5129
	pkt->entry_count = 1;
	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;

	ctio24 = (struct ctio7_to_24xx *)pkt;
	ctio24->entry_type = CTIO_TYPE7;
	ctio24->nport_handle = sess->loop_id;
5130
	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5131 5132 5133 5134 5135
	ctio24->vp_index = vha->vp_idx;
	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5136
	temp = (atio->u.isp24.attr << 9) |
5137
		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5138 5139
		CTIO7_FLAGS_DONT_RET_CTIO;
	ctio24->u.status1.flags = cpu_to_le16(temp);
5140 5141 5142 5143 5144 5145
	/*
	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
	 * if the explicit conformation is used.
	 */
	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5146 5147
	/* Memory Barrier */
	wmb();
5148 5149 5150 5151
	if (qpair->reqq_start_iocbs)
		qpair->reqq_start_iocbs(qpair);
	else
		qla2x00_start_iocbs(vha, qpair->req);
5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165
	return 0;
}

/*
 * This routine is used to allocate a command for either a QFull condition
 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
 * out previously.
 */
static void
qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull)
{
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct qla_hw_data *ha = vha->hw;
5166
	struct fc_port *sess;
5167 5168 5169
	struct se_session *se_sess;
	struct qla_tgt_cmd *cmd;
	int tag;
5170
	unsigned long flags;
5171 5172 5173 5174 5175 5176 5177 5178 5179 5180

	if (unlikely(tgt->tgt_stop)) {
		ql_dbg(ql_dbg_io, vha, 0x300a,
			"New command while device %p is shutting down\n", tgt);
		return;
	}

	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5181 5182
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212
				vha->hw->tgt.num_qfull_cmds_dropped;

		ql_dbg(ql_dbg_io, vha, 0x3068,
			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
			vha->vp_idx, __func__,
			vha->hw->tgt.num_qfull_cmds_dropped);

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	sess = ha->tgt.tgt_ops->find_sess_by_s_id
		(vha, atio->u.isp24.fcp_hdr.s_id);
	if (!sess)
		return;

	se_sess = sess->se_sess;

	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
	if (tag < 0)
		return;

	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
	if (!cmd) {
		ql_dbg(ql_dbg_io, vha, 0x3009,
			"qla_target(%d): %s: Allocation of cmd failed\n",
			vha->vp_idx, __func__);

		vha->hw->tgt.num_qfull_cmds_dropped++;
		if (vha->hw->tgt.num_qfull_cmds_dropped >
5213 5214
			vha->qla_stats.stat_max_qfull_cmds_dropped)
			vha->qla_stats.stat_max_qfull_cmds_dropped =
5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228
				vha->hw->tgt.num_qfull_cmds_dropped;

		qlt_chk_exch_leak_thresh_hold(vha);
		return;
	}

	memset(cmd, 0, sizeof(struct qla_tgt_cmd));

	qlt_incr_num_pend_cmds(vha);
	INIT_LIST_HEAD(&cmd->cmd_list);
	memcpy(&cmd->atio, atio, sizeof(*atio));

	cmd->tgt = vha->vha_tgt.qla_tgt;
	cmd->vha = vha;
5229
	cmd->reset_count = ha->base_qpair->chip_reset;
5230
	cmd->q_full = 1;
5231
	cmd->qpair = ha->base_qpair;
5232 5233 5234 5235 5236 5237 5238 5239

	if (qfull) {
		cmd->q_full = 1;
		/* NOTE: borrowing the state field to carry the status */
		cmd->state = status;
	} else
		cmd->term_exchg = 1;

5240
	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5241 5242 5243 5244
	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);

	vha->hw->tgt.num_qfull_cmds_alloc++;
	if (vha->hw->tgt.num_qfull_cmds_alloc >
5245 5246
		vha->qla_stats.stat_max_qfull_cmds_alloc)
		vha->qla_stats.stat_max_qfull_cmds_alloc =
5247
			vha->hw->tgt.num_qfull_cmds_alloc;
5248
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5249 5250 5251
}

int
5252
qlt_free_qfull_cmds(struct qla_qpair *qpair)
5253
{
5254
	struct scsi_qla_host *vha = qpair->vha;
5255 5256 5257
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;
	struct qla_tgt_cmd *cmd, *tcmd;
5258
	struct list_head free_list, q_full_list;
5259 5260 5261 5262 5263 5264
	int rc = 0;

	if (list_empty(&ha->tgt.q_full_list))
		return 0;

	INIT_LIST_HEAD(&free_list);
5265
	INIT_LIST_HEAD(&q_full_list);
5266

5267
	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5268
	if (list_empty(&ha->tgt.q_full_list)) {
5269
		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5270 5271 5272
		return 0;
	}

5273 5274 5275 5276 5277
	list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);

	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
	list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5278 5279
		if (cmd->q_full)
			/* cmd->state is a borrowed field to hold status */
5280
			rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5281
		else if (cmd->term_exchg)
5282
			rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305

		if (rc == -ENOMEM)
			break;

		if (cmd->q_full)
			ql_dbg(ql_dbg_io, vha, 0x3006,
			    "%s: busy sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else if (cmd->term_exchg)
			ql_dbg(ql_dbg_io, vha, 0x3007,
			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
		else
			ql_dbg(ql_dbg_io, vha, 0x3008,
			    "%s: Unexpected cmd in QFull list %p\n", __func__,
			    cmd);

		list_del(&cmd->cmd_list);
		list_add_tail(&cmd->cmd_list, &free_list);

		/* piggy back on hardware_lock for protection */
		vha->hw->tgt.num_qfull_cmds_alloc--;
	}
5306
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5307 5308 5309 5310 5311 5312 5313 5314 5315 5316

	cmd = NULL;

	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
		list_del(&cmd->cmd_list);
		/* This cmd was never sent to TCM.  There is no need
		 * to schedule free or call free_cmd
		 */
		qlt_free_cmd(cmd);
	}
5317 5318 5319 5320 5321 5322 5323

	if (!list_empty(&q_full_list)) {
		spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
		list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
	}

5324 5325 5326 5327
	return rc;
}

static void
5328 5329
qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
    uint16_t status)
5330 5331
{
	int rc = 0;
5332
	struct scsi_qla_host *vha = qpair->vha;
5333

5334
	rc = __qlt_send_busy(qpair, atio, status);
5335 5336 5337 5338 5339
	if (rc == -ENOMEM)
		qlt_alloc_qfull_cmd(vha, atio, status, 1);
}

static int
5340 5341
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
	struct atio_from_isp *atio, uint8_t ha_locked)
5342 5343 5344
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t status;
5345
	unsigned long flags;
5346 5347 5348 5349

	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
		return 0;

5350 5351
	if (!ha_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);
5352
	status = temp_sam_status;
5353
	qlt_send_busy(qpair, atio, status);
5354 5355 5356
	if (!ha_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

5357
	return 1;
5358 5359 5360 5361 5362
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5363
	struct atio_from_isp *atio, uint8_t ha_locked)
5364 5365
{
	struct qla_hw_data *ha = vha->hw;
5366
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5367
	int rc;
5368
	unsigned long flags;
5369 5370

	if (unlikely(tgt == NULL)) {
5371
		ql_dbg(ql_dbg_tgt, vha, 0x3064,
5372 5373 5374 5375 5376 5377 5378 5379
		    "ATIO pkt, but no tgt (ha %p)", ha);
		return;
	}
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

5380
	tgt->atio_irq_cmd_count++;
5381 5382 5383 5384 5385

	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
		if (unlikely(atio->u.isp24.exchange_addr ==
		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5386
			ql_dbg(ql_dbg_io, vha, 0x3065,
5387 5388 5389
			    "qla_target(%d): ATIO_TYPE7 "
			    "received with UNKNOWN exchange address, "
			    "sending QUEUE_FULL\n", vha->vp_idx);
5390 5391
			if (!ha_locked)
				spin_lock_irqsave(&ha->hardware_lock, flags);
5392 5393
			qlt_send_busy(ha->base_qpair, atio,
			    SAM_STAT_TASK_SET_FULL);
5394
			if (!ha_locked)
5395 5396
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
5397 5398
			break;
		}
5399 5400

		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5401 5402
			rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
			    atio, ha_locked);
5403
			if (rc != 0) {
5404
				tgt->atio_irq_cmd_count--;
5405 5406
				return;
			}
5407
			rc = qlt_handle_cmd_for_atio(vha, atio);
5408
		} else {
5409
			rc = qlt_handle_task_mgmt(vha, atio);
5410
		}
5411 5412
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
5413
				if (!ha_locked)
5414 5415
					spin_lock_irqsave(&ha->hardware_lock,
					    flags);
5416

5417
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5418 5419
				qlt_send_busy(ha->base_qpair, atio,
				    SAM_STAT_BUSY);
5420
#else
5421 5422
				qlt_send_term_exchange(ha->base_qpair, NULL,
				    atio, 1, 0);
5423
#endif
5424
				if (!ha_locked)
5425 5426
					spin_unlock_irqrestore(
					    &ha->hardware_lock, flags);
5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe059,
					    "qla_target: Unable to send "
					    "command to target for req, "
					    "ignoring.\n");
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status.\n", vha->vp_idx);
5438 5439 5440
					if (!ha_locked)
						spin_lock_irqsave(
						    &ha->hardware_lock, flags);
5441 5442
					qlt_send_busy(ha->base_qpair,
					    atio, SAM_STAT_BUSY);
5443 5444 5445
					if (!ha_locked)
						spin_unlock_irqrestore(
						    &ha->hardware_lock, flags);
5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
				}
			}
		}
		break;

	case IMMED_NOTIFY_TYPE:
	{
		if (unlikely(atio->u.isp2x.entry_status != 0)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
			    "qla_target(%d): Received ATIO packet %x "
			    "with error status %x\n", vha->vp_idx,
			    atio->u.raw.entry_type,
			    atio->u.isp2x.entry_status);
			break;
		}
		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5462 5463 5464

		if (!ha_locked)
			spin_lock_irqsave(&ha->hardware_lock, flags);
5465
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5466 5467
		if (!ha_locked)
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5468 5469 5470 5471 5472 5473 5474 5475 5476 5477
		break;
	}

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

5478
	tgt->atio_irq_cmd_count--;
5479 5480 5481 5482
}

/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
5483 5484
static void qlt_response_pkt(struct scsi_qla_host *vha,
	struct rsp_que *rsp, response_t *pkt)
5485 5486
{
	struct qla_hw_data *ha = vha->hw;
5487
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501

	if (unlikely(tgt == NULL)) {
		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
		    "qla_target(%d): Response pkt %x received, but no "
		    "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
		return;
	}

	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */

	switch (pkt->entry_type) {
5502
	case CTIO_CRC2:
5503 5504 5505
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5506
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5507 5508 5509 5510 5511 5512 5513 5514 5515 5516
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case ACCEPT_TGT_IO_TYPE:
	{
		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
		int rc;
		if (atio->u.isp2x.status !=
5517
		    cpu_to_le16(ATIO_CDB_VALID)) {
5518 5519 5520 5521 5522 5523 5524
			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
			    "qla_target(%d): ATIO with error "
			    "status %x received\n", vha->vp_idx,
			    le16_to_cpu(atio->u.isp2x.status));
			break;
		}

5525
		rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5526
		if (rc != 0)
5527 5528
			return;

5529 5530 5531 5532
		rc = qlt_handle_cmd_for_atio(vha, atio);
		if (unlikely(rc != 0)) {
			if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5533
				qlt_send_busy(rsp->qpair, atio, 0);
5534
#else
5535
				qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
5536 5537 5538 5539 5540 5541 5542
#endif
			} else {
				if (tgt->tgt_stop) {
					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
					    "qla_target: Unable to send "
					    "command to target, sending TERM "
					    "EXCHANGE for rsp\n");
5543
					qlt_send_term_exchange(rsp->qpair, NULL,
5544
					    atio, 1, 0);
5545 5546 5547 5548 5549
				} else {
					ql_dbg(ql_dbg_tgt, vha, 0xe060,
					    "qla_target(%d): Unable to send "
					    "command to target, sending BUSY "
					    "status\n", vha->vp_idx);
5550
					qlt_send_busy(rsp->qpair, atio, 0);
5551 5552 5553 5554 5555 5556 5557 5558 5559
				}
			}
		}
	}
	break;

	case CONTINUE_TGT_IO_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5560
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5561 5562 5563 5564 5565 5566 5567 5568
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case CTIO_A64_TYPE:
	{
		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5569
		qlt_do_ctio_completion(vha, rsp, entry->handle,
5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588
		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
		    entry);
		break;
	}

	case IMMED_NOTIFY_TYPE:
		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
		break;

	case NOTIFY_ACK_TYPE:
		if (tgt->notify_ack_expected > 0) {
			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe036,
			    "NOTIFY_ACK seq %08x status %x\n",
			    le16_to_cpu(entry->u.isp2x.seq_id),
			    le16_to_cpu(entry->u.isp2x.status));
			tgt->notify_ack_expected--;
			if (entry->u.isp2x.status !=
5589
			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
				ql_dbg(ql_dbg_tgt, vha, 0xe061,
				    "qla_target(%d): NOTIFY_ACK "
				    "failed %x\n", vha->vp_idx,
				    le16_to_cpu(entry->u.isp2x.status));
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe062,
			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
			    vha->vp_idx);
		}
		break;

	case ABTS_RECV_24XX:
		ql_dbg(ql_dbg_tgt, vha, 0xe037,
		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
		break;

	case ABTS_RESP_24XX:
		if (tgt->abts_resp_expected > 0) {
			struct abts_resp_from_24xx_fw *entry =
				(struct abts_resp_from_24xx_fw *)pkt;
			ql_dbg(ql_dbg_tgt, vha, 0xe038,
			    "ABTS_RESP_24XX: compl_status %x\n",
			    entry->compl_status);
			tgt->abts_resp_expected--;
			if (le16_to_cpu(entry->compl_status) !=
			    ABTS_RESP_COMPL_SUCCESS) {
				if ((entry->error_subcode1 == 0x1E) &&
				    (entry->error_subcode2 == 0)) {
					/*
					 * We've got a race here: aborted
					 * exchange not terminated, i.e.
					 * response for the aborted command was
					 * sent between the abort request was
					 * received and processed.
					 * Unfortunately, the firmware has a
					 * silly requirement that all aborted
					 * exchanges must be explicitely
					 * terminated, otherwise it refuses to
					 * send responses for the abort
					 * requests. So, we have to
					 * (re)terminate the exchange and retry
					 * the abort response.
					 */
					qlt_24xx_retry_term_exchange(vha,
					    entry);
				} else
					ql_dbg(ql_dbg_tgt, vha, 0xe063,
					    "qla_target(%d): ABTS_RESP_24XX "
					    "failed %x (subcode %x:%x)",
					    vha->vp_idx, entry->compl_status,
					    entry->error_subcode1,
					    entry->error_subcode2);
			}
		} else {
			ql_dbg(ql_dbg_tgt, vha, 0xe064,
			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
			    "received\n", vha->vp_idx);
		}
		break;

	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe065,
		    "qla_target(%d): Received unknown response pkt "
		    "type %x\n", vha->vp_idx, pkt->entry_type);
		break;
	}

}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
	uint16_t *mailbox)
{
	struct qla_hw_data *ha = vha->hw;
5668
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5669
	int login_code;
5670

5671
	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689
		return;

	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
	    IS_QLA2100(ha))
		return;
	/*
	 * In tgt_stop mode we also should allow all requests to pass.
	 * Otherwise, some commands can stuck.
	 */


	switch (code) {
	case MBA_RESET:			/* Reset */
	case MBA_SYSTEM_ERR:		/* System Error */
	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
		    "qla_target(%d): System error async event %#x "
5690
		    "occurred", vha->vp_idx, code);
5691 5692 5693 5694 5695 5696 5697 5698
		break;
	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		break;

	case MBA_LOOP_UP:
	{
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5699
		    "qla_target(%d): Async LOOP_UP occurred "
5700 5701 5702
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5703
		if (tgt->link_reinit_iocb_pending) {
5704 5705
			qlt_send_notify_ack(ha->base_qpair,
			    (void *)&tgt->link_reinit_iocb,
5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716
			    0, 0, 0, 0, 0, 0);
			tgt->link_reinit_iocb_pending = 0;
		}
		break;
	}

	case MBA_LIP_OCCURRED:
	case MBA_LOOP_DOWN:
	case MBA_LIP_RESET:
	case MBA_RSCN_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5717
		    "qla_target(%d): Async event %#x occurred "
5718 5719 5720
		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5721 5722
		break;

5723
	case MBA_REJECTED_FCP_CMD:
5724 5725 5726 5727 5728
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
		    vha->vp_idx,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5729 5730 5731 5732 5733

		if (le16_to_cpu(mailbox[3]) == 1) {
			/* exchange starvation. */
			vha->hw->exch_starvation++;
			if (vha->hw->exch_starvation > 5) {
5734
				ql_log(ql_log_warn, vha, 0xd03a,
5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748
				    "Exchange starvation-. Resetting RISC\n");

				vha->hw->exch_starvation = 0;
				if (IS_P3P_TYPE(vha->hw))
					set_bit(FCOE_CTX_RESET_NEEDED,
					    &vha->dpc_flags);
				else
					set_bit(ISP_ABORT_NEEDED,
					    &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
			}
		}
		break;

5749 5750 5751
	case MBA_PORT_UPDATE:
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
		    "qla_target(%d): Port update async event %#x "
5752
		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5753 5754 5755 5756 5757
		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));

		login_code = le16_to_cpu(mailbox[2]);
5758
		if (login_code == 0x4) {
5759 5760
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
			    "Async MB 2: Got PLOGI Complete\n");
5761 5762
			vha->hw->exch_starvation = 0;
		} else if (login_code == 0x7)
5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
			    "Async MB 2: Port Logged Out\n");
		break;
	default:
		break;
	}

}

static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
	uint16_t loop_id)
{
5775
	fc_port_t *fcport, *tfcp, *del;
5776
	int rc;
5777 5778
	unsigned long flags;
	u8 newfcport = 0;
5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789

	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
	if (!fcport) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
		    "qla_target(%d): Allocation of tmp FC port failed",
		    vha->vp_idx);
		return NULL;
	}

	fcport->loop_id = loop_id;

5790
	rc = qla24xx_gpdb_wait(vha, fcport, 0);
5791 5792 5793 5794 5795 5796 5797 5798 5799
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
		    "qla_target(%d): Failed to retrieve fcport "
		    "information -- get_port_database() returned %x "
		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
		kfree(fcport);
		return NULL;
	}

5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832
	del = NULL;
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);

	if (tfcp) {
		tfcp->d_id = fcport->d_id;
		tfcp->port_type = fcport->port_type;
		tfcp->supported_classes = fcport->supported_classes;
		tfcp->flags |= fcport->flags;

		del = fcport;
		fcport = tfcp;
	} else {
		if (vha->hw->current_topology == ISP_CFG_F)
			fcport->flags |= FCF_FABRIC_DEVICE;

		list_add_tail(&fcport->list, &vha->vp_fcports);
		if (!IS_SW_RESV_ADDR(fcport->d_id))
		   vha->fcport_count++;
		fcport->login_gen++;
		fcport->disc_state = DSC_LOGIN_COMPLETE;
		fcport->login_succ = 1;
		newfcport = 1;
	}

	fcport->deleted = 0;
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

	switch (vha->host->active_mode) {
	case MODE_INITIATOR:
	case MODE_DUAL:
		if (newfcport) {
			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5833
				ql_dbg(ql_dbg_disc, vha, 0x20fe,
5834 5835 5836 5837
				   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_upd_fcport_work(vha, fcport);
			} else {
5838
				ql_dbg(ql_dbg_disc, vha, 0x20ff,
5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852
				   "%s %d %8phC post gpsc fcp_cnt %d\n",
				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
				qla24xx_post_gpsc_work(vha, fcport);
			}
		}
		break;

	case MODE_TARGET:
	default:
		break;
	}
	if (del)
		qla2x00_free_fcport(del);

5853 5854 5855 5856
	return fcport;
}

/* Must be called under tgt_mutex */
5857
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
5858 5859
	uint8_t *s_id)
{
5860
	struct fc_port *sess = NULL;
5861 5862 5863 5864
	fc_port_t *fcport = NULL;
	int rc, global_resets;
	uint16_t loop_id = 0;

5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875
	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
		/*
		 * This is Domain Controller, so it should be
		 * OK to drop SCSI commands from it.
		 */
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
		    "Unable to find initiator with S_ID %x:%x:%x",
		    s_id[0], s_id[1], s_id[2]);
		return NULL;
	}

5876 5877
	mutex_lock(&vha->vha_tgt.tgt_mutex);

5878
retry:
5879 5880
	global_resets =
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5881 5882 5883

	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
	if (rc != 0) {
5884 5885
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

5886 5887 5888 5889 5890
		ql_log(ql_log_info, vha, 0xf071,
		    "qla_target(%d): Unable to find "
		    "initiator with S_ID %x:%x:%x",
		    vha->vp_idx, s_id[0], s_id[1],
		    s_id[2]);
5891 5892 5893 5894 5895 5896 5897 5898

		if (rc == -ENOENT) {
			qlt_port_logo_t logo;
			sid_to_portid(s_id, &logo.id);
			logo.cmd_count = 1;
			qlt_send_first_logo(vha, &logo);
		}

5899 5900 5901 5902
		return NULL;
	}

	fcport = qlt_get_port_database(vha, loop_id);
5903 5904
	if (!fcport) {
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
5905
		return NULL;
5906
	}
5907 5908

	if (global_resets !=
5909
	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5910 5911 5912 5913
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
		    "qla_target(%d): global reset during session discovery "
		    "(counter was %d, new %d), retrying", vha->vp_idx,
		    global_resets,
5914 5915
		    atomic_read(&vha->vha_tgt.
			qla_tgt->tgt_global_resets_count));
5916 5917 5918 5919 5920
		goto retry;
	}

	sess = qlt_create_sess(vha, fcport, true);

5921 5922
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

5923 5924 5925 5926 5927 5928 5929 5930
	return sess;
}

static void qlt_abort_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5931
	struct fc_port *sess = NULL;
5932
	unsigned long flags = 0, flags2 = 0;
5933 5934 5935 5936
	uint32_t be_s_id;
	uint8_t s_id[3];
	int rc;

5937
	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5938 5939

	if (tgt->tgt_stop)
5940
		goto out_term2;
5941 5942 5943 5944 5945 5946 5947 5948

	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];

	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
	    (unsigned char *)&be_s_id);
	if (!sess) {
5949
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5950 5951 5952 5953

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

5954
		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5955
		if (!sess)
5956
			goto out_term2;
5957
	} else {
5958
		if (sess->deleted) {
5959
			sess = NULL;
5960
			goto out_term2;
5961 5962
		}

5963
		if (!kref_get_unless_zero(&sess->sess_kref)) {
5964
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
5965 5966 5967 5968 5969
			    "%s: kref_get fail %8phC \n",
			     __func__, sess->port_name);
			sess = NULL;
			goto out_term2;
		}
5970 5971 5972
	}

	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5973 5974 5975
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);

5976 5977 5978 5979
	if (rc != 0)
		goto out_term;
	return;

5980
out_term2:
5981 5982 5983
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5984

5985
out_term:
5986
	spin_lock_irqsave(&ha->hardware_lock, flags);
5987 5988
	qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
	    FCP_TMF_REJECTED, false);
5989
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5990 5991 5992 5993 5994 5995 5996 5997
}

static void qlt_tmr_work(struct qla_tgt *tgt,
	struct qla_tgt_sess_work_param *prm)
{
	struct atio_from_isp *a = &prm->tm_iocb2;
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = vha->hw;
5998
	struct fc_port *sess = NULL;
5999 6000 6001
	unsigned long flags;
	uint8_t *s_id = NULL; /* to hide compiler warnings */
	int rc;
6002
	u64 unpacked_lun;
6003
	int fn;
6004 6005
	void *iocb;

6006
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6007 6008

	if (tgt->tgt_stop)
6009
		goto out_term2;
6010 6011 6012 6013

	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
	if (!sess) {
6014
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6015 6016 6017 6018

		sess = qlt_make_local_sess(vha, s_id);
		/* sess has got an extra creation ref */

6019
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6020
		if (!sess)
6021
			goto out_term2;
6022
	} else {
6023
		if (sess->deleted) {
6024
			sess = NULL;
6025
			goto out_term2;
6026 6027
		}

6028
		if (!kref_get_unless_zero(&sess->sess_kref)) {
6029
			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6030 6031 6032
			    "%s: kref_get fail %8phC\n",
			     __func__, sess->port_name);
			sess = NULL;
6033
			goto out_term2;
6034
		}
6035 6036 6037 6038
	}

	iocb = a;
	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6039 6040
	unpacked_lun =
	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6041 6042

	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6043
	ha->tgt.tgt_ops->put_sess(sess);
6044
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6045 6046 6047

	if (rc != 0)
		goto out_term;
6048 6049
	return;

6050 6051 6052 6053
out_term2:
	if (sess)
		ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6054
out_term:
6055
	qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102
}

static void qlt_sess_work_fn(struct work_struct *work)
{
	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
	struct scsi_qla_host *vha = tgt->vha;
	unsigned long flags;

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		struct qla_tgt_sess_work_param *prm = list_entry(
		    tgt->sess_works_list.next, typeof(*prm),
		    sess_works_list_entry);

		/*
		 * This work can be scheduled on several CPUs at time, so we
		 * must delete the entry to eliminate double processing
		 */
		list_del(&prm->sess_works_list_entry);

		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

		switch (prm->type) {
		case QLA_TGT_SESS_WORK_ABORT:
			qlt_abort_work(tgt, prm);
			break;
		case QLA_TGT_SESS_WORK_TM:
			qlt_tmr_work(tgt, prm);
			break;
		default:
			BUG_ON(1);
			break;
		}

		spin_lock_irqsave(&tgt->sess_work_lock, flags);

		kfree(prm);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
}

/* Must be called under tgt_host_action_mutex */
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
	struct qla_tgt *tgt;
6103 6104
	int rc, i;
	struct qla_qpair_hint *h;
6105 6106 6107 6108

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

6109 6110 6111 6112 6113 6114
	if (!IS_TGT_MODE_CAPABLE(ha)) {
		ql_log(ql_log_warn, base_vha, 0xe070,
		    "This adapter does not support target mode.\n");
		return 0;
	}

6115
	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6116
	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6117

6118
	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6119 6120 6121 6122 6123 6124 6125 6126

	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
	if (!tgt) {
		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
		    "Unable to allocate struct qla_tgt\n");
		return -ENOMEM;
	}

6127 6128 6129 6130 6131 6132 6133 6134 6135
	tgt->qphints = kzalloc((ha->max_qpairs + 1) *
	    sizeof(struct qla_qpair_hint), GFP_KERNEL);
	if (!tgt->qphints) {
		kfree(tgt);
		ql_log(ql_log_warn, base_vha, 0x0197,
		    "Unable to allocate qpair hints.\n");
		return -ENOMEM;
	}

6136 6137 6138
	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
		base_vha->host->hostt->supported_mode |= MODE_TARGET;

6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167
	rc = btree_init64(&tgt->lun_qpair_map);
	if (rc) {
		kfree(tgt->qphints);
		kfree(tgt);
		ql_log(ql_log_info, base_vha, 0x0198,
			"Unable to initialize lun_qpair_map btree\n");
		return -EIO;
	}
	h = &tgt->qphints[0];
	h->qpair = ha->base_qpair;
	INIT_LIST_HEAD(&h->hint_elem);
	h->cpuid = ha->base_qpair->cpuid;
	list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);

	for (i = 0; i < ha->max_qpairs; i++) {
		unsigned long flags;

		struct qla_qpair *qpair = ha->queue_pair_map[i];
		h = &tgt->qphints[i + 1];
		INIT_LIST_HEAD(&h->hint_elem);
		if (qpair) {
			h->qpair = qpair;
			spin_lock_irqsave(qpair->qp_lock_ptr, flags);
			list_add_tail(&h->hint_elem, &qpair->hints_list);
			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
			h->cpuid = qpair->cpuid;
		}
	}

6168 6169 6170 6171 6172 6173 6174 6175 6176
	tgt->ha = ha;
	tgt->vha = base_vha;
	init_waitqueue_head(&tgt->waitQ);
	INIT_LIST_HEAD(&tgt->del_sess_list);
	spin_lock_init(&tgt->sess_work_lock);
	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
	INIT_LIST_HEAD(&tgt->sess_works_list);
	atomic_set(&tgt->tgt_global_resets_count, 0);

6177
	base_vha->vha_tgt.qla_tgt = tgt;
6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191

	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
		"qla_target(%d): using 64 Bit PCI addressing",
		base_vha->vp_idx);
	tgt->tgt_enable_64bit_addr = 1;
	/* 3 is reserved */
	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;

	mutex_lock(&qla_tgt_mutex);
	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
	mutex_unlock(&qla_tgt_mutex);

6192 6193 6194
	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
		ha->tgt.tgt_ops->add_target(base_vha);

6195 6196 6197 6198 6199 6200
	return 0;
}

/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
6201
	if (!vha->vha_tgt.qla_tgt)
6202 6203
		return 0;

6204 6205 6206 6207
	if (vha->fc_vport) {
		qlt_release(vha->vha_tgt.qla_tgt);
		return 0;
	}
6208 6209 6210 6211

	/* free left over qfull cmds */
	qlt_init_term_exchange(vha);

6212
	mutex_lock(&qla_tgt_mutex);
6213
	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6214 6215 6216 6217
	mutex_unlock(&qla_tgt_mutex);

	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
	    vha->host_no, ha);
6218
	qlt_release(vha->vha_tgt.qla_tgt);
6219 6220 6221 6222

	return 0;
}

6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233
void qlt_remove_target_resources(struct qla_hw_data *ha)
{
	struct scsi_qla_host *node;
	u32 key = 0;

	btree_for_each_safe32(&ha->tgt.host_map, key, node)
		btree_remove32(&ha->tgt.host_map, key);

	btree_destroy32(&ha->tgt.host_map);
}

6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
	unsigned char *b)
{
	int i;

	pr_debug("qla2xxx HW vha->node_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->node_name[i]);
	pr_debug("\n");
	pr_debug("qla2xxx HW vha->port_name: ");
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", vha->port_name[i]);
	pr_debug("\n");

	pr_debug("qla2xxx passed configfs WWPN: ");
	put_unaligned_be64(wwpn, b);
	for (i = 0; i < WWN_SIZE; i++)
		pr_debug("%02x ", b[i]);
	pr_debug("\n");
}

/**
 * qla_tgt_lport_register - register lport with external module
 *
 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
 * @wwpn: Passwd FC target WWPN
 * @callback:  lport initialization callback for tcm_qla2xxx code
 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
 */
6263 6264 6265
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
		       u64 npiv_wwpn, u64 npiv_wwnn,
		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287
{
	struct qla_tgt *tgt;
	struct scsi_qla_host *vha;
	struct qla_hw_data *ha;
	struct Scsi_Host *host;
	unsigned long flags;
	int rc;
	u8 b[WWN_SIZE];

	mutex_lock(&qla_tgt_mutex);
	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
		vha = tgt->vha;
		ha = vha->hw;

		host = vha->host;
		if (!host)
			continue;

		if (!(host->hostt->supported_mode & MODE_TARGET))
			continue;

		spin_lock_irqsave(&ha->hardware_lock, flags);
6288
		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6289 6290 6291 6292 6293
			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
			    host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6294 6295 6296 6297 6298 6299
		if (tgt->tgt_stop) {
			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
				 host->host_no);
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			continue;
		}
6300 6301 6302 6303 6304 6305 6306 6307
		spin_unlock_irqrestore(&ha->hardware_lock, flags);

		if (!scsi_host_get(host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe068,
			    "Unable to scsi_host_get() for"
			    " qla2xxx scsi_host\n");
			continue;
		}
6308
		qlt_lport_dump(vha, phys_wwpn, b);
6309 6310 6311 6312 6313

		if (memcmp(vha->port_name, b, WWN_SIZE)) {
			scsi_host_put(host);
			continue;
		}
6314 6315 6316 6317
		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
		if (rc != 0)
			scsi_host_put(host);

6318
		mutex_unlock(&qla_tgt_mutex);
6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338
		return rc;
	}
	mutex_unlock(&qla_tgt_mutex);

	return -ENODEV;
}
EXPORT_SYMBOL(qlt_lport_register);

/**
 * qla_tgt_lport_deregister - Degister lport
 *
 * @vha:  Registered scsi_qla_host pointer
 */
void qlt_lport_deregister(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	struct Scsi_Host *sh = vha->host;
	/*
	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
	 */
6339
	vha->vha_tgt.target_lport_ptr = NULL;
6340 6341 6342 6343 6344 6345 6346 6347 6348
	ha->tgt.tgt_ops = NULL;
	/*
	 * Release the Scsi_Host reference for the underlying qla2xxx host
	 */
	scsi_host_put(sh);
}
EXPORT_SYMBOL(qlt_lport_deregister);

/* Must be called under HW lock */
6349
static void qlt_set_mode(struct scsi_qla_host *vha)
6350 6351 6352 6353 6354 6355 6356
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_TARGET;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6357 6358 6359 6360
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_DUAL;
6361 6362 6363 6364 6365 6366 6367
		break;
	default:
		break;
	}
}

/* Must be called under HW lock */
6368
static void qlt_clear_mode(struct scsi_qla_host *vha)
6369 6370 6371 6372 6373 6374 6375 6376 6377
{
	switch (ql2x_ini_mode) {
	case QLA2XXX_INI_MODE_DISABLED:
		vha->host->active_mode = MODE_UNKNOWN;
		break;
	case QLA2XXX_INI_MODE_EXCLUSIVE:
		vha->host->active_mode = MODE_INITIATOR;
		break;
	case QLA2XXX_INI_MODE_ENABLED:
6378 6379
	case QLA2XXX_INI_MODE_DUAL:
		vha->host->active_mode = MODE_INITIATOR;
6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394
		break;
	default:
		break;
	}
}

/*
 * qla_tgt_enable_vha - NO LOCK HELD
 *
 * host_reset, bring up w/ Target Mode Enabled
 */
void
qlt_enable_vha(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
6395
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6396
	unsigned long flags;
6397
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe069,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	tgt->tgt_stopped = 0;
	qlt_set_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

6412 6413 6414 6415 6416 6417 6418 6419
	if (vha->vp_idx) {
		qla24xx_disable_vp(vha);
		qla24xx_enable_vp(vha);
	} else {
		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
		qla2xxx_wake_dpc(base_vha);
		qla2x00_wait_for_hba_online(base_vha);
	}
6420 6421 6422 6423 6424 6425 6426 6427
}
EXPORT_SYMBOL(qlt_enable_vha);

/*
 * qla_tgt_disable_vha - NO LOCK HELD
 *
 * Disable Target Mode and reset the adapter
 */
6428
static void qlt_disable_vha(struct scsi_qla_host *vha)
6429 6430
{
	struct qla_hw_data *ha = vha->hw;
6431
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458
	unsigned long flags;

	if (!tgt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
		    "Unable to locate qla_tgt pointer from"
		    " struct qla_hw_data\n");
		dump_stack();
		return;
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	qlt_clear_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
	qla2xxx_wake_dpc(vha);
	qla2x00_wait_for_hba_online(vha);
}

/*
 * Called from qla_init.c:qla24xx_vport_create() contex to setup
 * the target mode specific struct scsi_qla_host and struct qla_hw_data
 * members.
 */
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
6459 6460 6461 6462
	vha->vha_tgt.qla_tgt = NULL;

	mutex_init(&vha->vha_tgt.tgt_mutex);
	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6463 6464 6465 6466 6467 6468 6469 6470 6471 6472

	qlt_clear_mode(vha);

	/*
	 * NOTE: Currently the value is kept the same for <24xx and
	 * >=24xx ISPs. If it is necessary to change it,
	 * the check should be added for specific ISPs,
	 * assigning the value appropriately.
	 */
	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6473 6474

	qlt_add_target(ha, vha);
6475 6476 6477 6478 6479 6480 6481 6482 6483
}

void
qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
{
	/*
	 * FC-4 Feature bit 0 indicates target functionality to the name server.
	 */
	if (qla_tgt_mode_enabled(vha)) {
6484
		ct_req->req.rff_id.fc4_feature = BIT_0;
6485 6486
	} else if (qla_ini_mode_enabled(vha)) {
		ct_req->req.rff_id.fc4_feature = BIT_1;
6487 6488
	} else if (qla_dual_mode_enabled(vha))
		ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506
}

/*
 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
 * @ha: HA context
 *
 * Beginning of ATIO ring has initialization control block already built
 * by nvram config routine.
 *
 * Returns 0 on success.
 */
void
qlt_init_atio_q_entries(struct scsi_qla_host *vha)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t cnt;
	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;

6507
	if (qla_ini_mode_enabled(vha))
6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521
		return;

	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
		pkt->u.raw.signature = ATIO_PROCESSED;
		pkt++;
	}

}

/*
 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
 * @ha: SCSI driver HA context
 */
void
6522
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6523 6524 6525 6526 6527
{
	struct qla_hw_data *ha = vha->hw;
	struct atio_from_isp *pkt;
	int cnt, i;

6528
	if (!ha->flags.fw_started)
6529 6530
		return;

6531 6532
	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6533 6534 6535
		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		cnt = pkt->u.raw.entry_count;

6536 6537 6538 6539 6540 6541
		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
			/*
			 * This packet is corrupted. The header + payload
			 * can not be trusted. There is no point in passing
			 * it further up.
			 */
6542
			ql_log(ql_log_warn, vha, 0xd03c,
6543 6544 6545 6546 6547 6548
			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
			    pkt->u.isp24.fcp_hdr.s_id,
			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);

			adjust_corrupted_atio(pkt);
6549 6550
			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
			    ha_locked, 0);
6551 6552 6553 6554
		} else {
			qlt_24xx_atio_pkt_all_vps(vha,
			    (struct atio_from_isp *)pkt, ha_locked);
		}
6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570

		for (i = 0; i < cnt; i++) {
			ha->tgt.atio_ring_index++;
			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
				ha->tgt.atio_ring_index = 0;
				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
			} else
				ha->tgt.atio_ring_ptr++;

			pkt->u.raw.signature = ATIO_PROCESSED;
			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
		}
		wmb();
	}

	/* Adjust ring index */
6571
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6572
	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6573 6574 6575
}

void
6576
qlt_24xx_config_rings(struct scsi_qla_host *vha)
6577 6578
{
	struct qla_hw_data *ha = vha->hw;
6579 6580
	if (!QLA_TGT_MODE_ENABLED())
		return;
6581

6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593
	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));

	if (IS_ATIO_MSIX_CAPABLE(ha)) {
		struct qla_msix_entry *msix = &ha->msix_entries[2];
		struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;

		icb->msix_atio = cpu_to_le16(msix->entry);
		ql_dbg(ql_dbg_init, vha, 0xf072,
		    "Registering ICB vector 0x%x for atio que.\n",
		    msix->entry);
6594 6595 6596 6597 6598 6599 6600
	}
}

void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
	struct qla_hw_data *ha = vha->hw;
6601 6602 6603

	if (!QLA_TGT_MODE_ENABLED())
		return;
6604

6605
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6618
		if (qla_tgt_mode_enabled(vha))
6619
			nv->exchange_count = cpu_to_le16(0xFFFF);
6620 6621
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6622 6623

		/* Enable target mode */
6624
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6625 6626

		/* Disable ini mode, if requested */
6627
		if (qla_tgt_mode_enabled(vha))
6628
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6629 6630

		/* Disable Full Login after LIP */
6631
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6632
		/* Enable initial LIP */
6633
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6634 6635 6636 6637 6638 6639 6640
		if (ql2xtgt_tape_enable)
			/* Enable FC Tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC Tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6641
		/* Disable Full Login after LIP */
6642
		nv->host_p &= cpu_to_le32(~BIT_10);
6643 6644 6645 6646 6647 6648 6649

		/*
		 * clear BIT 15 explicitly as we have seen at least
		 * a couple of instances where this was set and this
		 * was causing the firmware to not be initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6650
		/* Enable target PRLI control */
6651
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

6665
	if (ha->base_qpair->enable_class_2) {
6666 6667 6668 6669
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6670
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6671 6672 6673 6674
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6675
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6676 6677 6678 6679 6680 6681 6682 6683 6684
	}
}

void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_24xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

Q
Quinn Tran 已提交
6685 6686 6687
	if (!QLA_TGT_MODE_ENABLED())
		return;

6688 6689
	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6690
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6691
	}
Q
Quinn Tran 已提交
6692 6693 6694 6695 6696 6697 6698 6699

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}
6700 6701
}

6702 6703 6704 6705 6706 6707 6708 6709
void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

6710
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722
		if (!ha->tgt.saved_set) {
			/* We save only once */
			ha->tgt.saved_exchange_count = nv->exchange_count;
			ha->tgt.saved_firmware_options_1 =
			    nv->firmware_options_1;
			ha->tgt.saved_firmware_options_2 =
			    nv->firmware_options_2;
			ha->tgt.saved_firmware_options_3 =
			    nv->firmware_options_3;
			ha->tgt.saved_set = 1;
		}

6723
		if (qla_tgt_mode_enabled(vha))
6724
			nv->exchange_count = cpu_to_le16(0xFFFF);
6725 6726
		else			/* dual */
			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6727 6728

		/* Enable target mode */
6729
		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6730 6731

		/* Disable ini mode, if requested */
6732
		if (qla_tgt_mode_enabled(vha))
6733
			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6734
		/* Disable Full Login after LIP */
6735
		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6736
		/* Enable initial LIP */
6737
		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6738 6739 6740 6741 6742 6743 6744
		/*
		 * clear BIT 15 explicitly as we have seen at
		 * least a couple of instances where this was set
		 * and this was causing the firmware to not be
		 * initialized.
		 */
		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6745 6746 6747 6748 6749 6750 6751
		if (ql2xtgt_tape_enable)
			/* Enable FC tape support */
			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
		else
			/* Disable FC tape support */
			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);

6752
		/* Disable Full Login after LIP */
6753
		nv->host_p &= cpu_to_le32(~BIT_10);
6754
		/* Enable target PRLI control */
6755
		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768
	} else {
		if (ha->tgt.saved_set) {
			nv->exchange_count = ha->tgt.saved_exchange_count;
			nv->firmware_options_1 =
			    ha->tgt.saved_firmware_options_1;
			nv->firmware_options_2 =
			    ha->tgt.saved_firmware_options_2;
			nv->firmware_options_3 =
			    ha->tgt.saved_firmware_options_3;
		}
		return;
	}

6769
	if (ha->base_qpair->enable_class_2) {
6770 6771 6772 6773
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) =
				FC_COS_CLASS2 | FC_COS_CLASS3;

6774
		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6775 6776 6777 6778
	} else {
		if (vha->flags.init_done)
			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;

6779
		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793
	}
}

void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
	struct init_cb_81xx *icb)
{
	struct qla_hw_data *ha = vha->hw;

	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.node_name_set) {
		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6794
		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6795
	}
Q
Quinn Tran 已提交
6796 6797 6798 6799 6800 6801 6802 6803 6804

	/* disable ZIO at start time. */
	if (!vha->flags.init_done) {
		uint32_t tmp;
		tmp = le32_to_cpu(icb->firmware_options_2);
		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
		icb->firmware_options_2 = cpu_to_le32(tmp);
	}

6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815
}

void
qlt_83xx_iospace_config(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	ha->msix_count += 1; /* For ATIO Q */
}

6816 6817 6818 6819 6820

void
qlt_modify_vp_config(struct scsi_qla_host *vha,
	struct vp_config_entry_24xx *vpmod)
{
6821 6822
	/* enable target mode.  Bit5 = 1 => disable */
	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
6823
		vpmod->options_idx1 &= ~BIT_5;
6824

6825
	/* Disable ini mode, if requested.  bit4 = 1 => disable */
6826
	if (qla_tgt_mode_enabled(vha))
6827 6828 6829 6830 6831 6832
		vpmod->options_idx1 &= ~BIT_4;
}

void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
6833 6834
	int rc;

6835 6836 6837
	if (!QLA_TGT_MODE_ENABLED())
		return;

6838
	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6839 6840 6841 6842 6843 6844 6845
		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
	} else {
		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
	}

6846 6847
	mutex_init(&base_vha->vha_tgt.tgt_mutex);
	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6848 6849 6850 6851 6852

	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
	    qlt_unknown_atio_work_fn);

6853
	qlt_clear_mode(base_vha);
6854 6855 6856

	rc = btree_init32(&ha->tgt.host_map);
	if (rc)
6857
		ql_log(ql_log_info, base_vha, 0xd03d,
6858 6859 6860
		    "Unable to initialize ha->host_map btree\n");

	qlt_update_vp_map(base_vha, SET_VP_IDX);
6861 6862
}

6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874
irqreturn_t
qla83xx_msix_atio_q(int irq, void *dev_id)
{
	struct rsp_que *rsp;
	scsi_qla_host_t	*vha;
	struct qla_hw_data *ha;
	unsigned long flags;

	rsp = (struct rsp_que *) dev_id;
	ha = rsp->hw;
	vha = pci_get_drvdata(ha->pdev);

6875
	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6876

6877
	qlt_24xx_process_atio_queue(vha, 0);
6878

6879
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6880 6881 6882 6883

	return IRQ_HANDLED;
}

6884 6885 6886 6887 6888 6889 6890 6891 6892
static void
qlt_handle_abts_recv_work(struct work_struct *work)
{
	struct qla_tgt_sess_op *op = container_of(work,
		struct qla_tgt_sess_op, work);
	scsi_qla_host_t *vha = op->vha;
	struct qla_hw_data *ha = vha->hw;
	unsigned long flags;

6893 6894
	if (qla2x00_reset_active(vha) ||
	    (op->chip_reset != ha->base_qpair->chip_reset))
6895 6896 6897 6898 6899 6900 6901
		return;

	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
	qlt_24xx_process_atio_queue(vha, 0);
	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);

	spin_lock_irqsave(&ha->hardware_lock, flags);
6902
	qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
6903
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6904 6905

	kfree(op);
6906 6907 6908
}

void
6909 6910
qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
    response_t *pkt)
6911 6912 6913 6914 6915 6916 6917 6918 6919
{
	struct qla_tgt_sess_op *op;

	op = kzalloc(sizeof(*op), GFP_ATOMIC);

	if (!op) {
		/* do not reach for ATIO queue here.  This is best effort err
		 * recovery at this point.
		 */
6920
		qlt_response_pkt_all_vps(vha, rsp, pkt);
6921 6922 6923 6924 6925
		return;
	}

	memcpy(&op->atio, pkt, sizeof(*pkt));
	op->vha = vha;
6926
	op->chip_reset = vha->hw->base_qpair->chip_reset;
6927
	op->rsp = rsp;
6928 6929 6930 6931 6932
	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
	queue_work(qla_tgt_wq, &op->work);
	return;
}

6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
	if (!ha->tgt.tgt_vp_map)
		return -ENOMEM;

	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
	    &ha->tgt.atio_dma, GFP_KERNEL);
	if (!ha->tgt.atio_ring) {
		kfree(ha->tgt.tgt_vp_map);
		return -ENOMEM;
	}
	return 0;
}

void
qlt_mem_free(struct qla_hw_data *ha)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	if (ha->tgt.atio_ring) {
		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
		    ha->tgt.atio_dma);
	}
	kfree(ha->tgt.tgt_vp_map);
}

/* vport_slock to be held by the caller */
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
6972 6973 6974 6975
	void *slot;
	u32 key;
	int rc;

6976 6977 6978
	if (!QLA_TGT_MODE_ENABLED())
		return;

6979 6980
	key = vha->d_id.b24;

6981 6982 6983 6984 6985
	switch (cmd) {
	case SET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
		break;
	case SET_AL_PA:
6986 6987
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (!slot) {
6988
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
6989 6990 6991 6992
			    "Save vha in host_map %p %06x\n", vha, key);
			rc = btree_insert32(&vha->hw->tgt.host_map,
				key, vha, GFP_ATOMIC);
			if (rc)
6993
				ql_log(ql_log_info, vha, 0xd03e,
6994 6995 6996 6997
				    "Unable to insert s_id into host_map: %06x\n",
				    key);
			return;
		}
6998 6999
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
		    "replace existing vha in host_map %p %06x\n", vha, key);
7000
		btree_update32(&vha->hw->tgt.host_map, key, vha);
7001 7002 7003 7004 7005
		break;
	case RESET_VP_IDX:
		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
		break;
	case RESET_AL_PA:
7006
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7007 7008 7009 7010 7011
		   "clear vha in host_map %p %06x\n", vha, key);
		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
		if (slot)
			btree_remove32(&vha->hw->tgt.host_map, key);
		vha->d_id.b24 = 0;
7012 7013 7014 7015
		break;
	}
}

7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
	unsigned long flags;
	struct qla_hw_data *ha = vha->hw;

	if (!vha->d_id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	} else if (vha->d_id.b24 != id.b24) {
		spin_lock_irqsave(&ha->vport_slock, flags);
		qlt_update_vp_map(vha, RESET_AL_PA);
		vha->d_id = id;
		qlt_update_vp_map(vha, SET_AL_PA);
		spin_unlock_irqrestore(&ha->vport_slock, flags);
	}
}

7035 7036 7037 7038 7039 7040 7041 7042
static int __init qlt_parse_ini_mode(void)
{
	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7043 7044
	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067
	else
		return false;

	return true;
}

int __init qlt_init(void)
{
	int ret;

	if (!qlt_parse_ini_mode()) {
		ql_log(ql_log_fatal, NULL, 0xe06b,
		    "qlt_parse_ini_mode() failed\n");
		return -EINVAL;
	}

	if (!QLA_TGT_MODE_ENABLED())
		return 0;

	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
	    qla_tgt_mgmt_cmd), 0, NULL);
	if (!qla_tgt_mgmt_cmd_cachep) {
7068
		ql_log(ql_log_fatal, NULL, 0xd04b,
7069
		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7070
		return -ENOMEM;
7071 7072
	}

7073
	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7074 7075
	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
	    0, NULL);
7076 7077 7078 7079 7080 7081 7082 7083

	if (!qla_tgt_plogi_cachep) {
		ql_log(ql_log_fatal, NULL, 0xe06d,
		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
		ret = -ENOMEM;
		goto out_mgmt_cmd_cachep;
	}

7084 7085 7086 7087 7088 7089
	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
	if (!qla_tgt_mgmt_cmd_mempool) {
		ql_log(ql_log_fatal, NULL, 0xe06e,
		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
		ret = -ENOMEM;
7090
		goto out_plogi_cachep;
7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106
	}

	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
	if (!qla_tgt_wq) {
		ql_log(ql_log_fatal, NULL, 0xe06f,
		    "alloc_workqueue for qla_tgt_wq failed\n");
		ret = -ENOMEM;
		goto out_cmd_mempool;
	}
	/*
	 * Return 1 to signal that initiator-mode is being disabled
	 */
	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;

out_cmd_mempool:
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7107 7108
out_plogi_cachep:
	kmem_cache_destroy(qla_tgt_plogi_cachep);
7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120
out_mgmt_cmd_cachep:
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
	return ret;
}

void qlt_exit(void)
{
	if (!QLA_TGT_MODE_ENABLED())
		return;

	destroy_workqueue(qla_tgt_wq);
	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7121
	kmem_cache_destroy(qla_tgt_plogi_cachep);
7122 7123
	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
}