qla_mid.c 25.0 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9
#include "qla_target.h"
10 11 12

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20 21 22
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
23
	if (vha->vp_idx && vha->timer_active) {
24 25 26 27 28
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
29
static uint32_t
30 31 32
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
33
	struct qla_hw_data *ha = vha->hw;
34
	unsigned long flags;
35 36

	/* Find an empty slot and assign an vp_id */
37
	mutex_lock(&ha->vport_lock);
38 39
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
40 41 42
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
43
		mutex_unlock(&ha->vport_lock);
44 45 46
		return vp_id;
	}

47
	set_bit(vp_id, ha->vp_idx_map);
48 49
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
50 51

	spin_lock_irqsave(&ha->vport_slock, flags);
52
	list_add_tail(&vha->list, &ha->vp_list);
53
	spin_unlock_irqrestore(&ha->vport_slock, flags);
54

55
	spin_lock_irqsave(&ha->hardware_lock, flags);
56
	qlt_update_vp_map(vha, SET_VP_IDX);
57
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
58

59
	mutex_unlock(&ha->vport_lock);
60 61 62 63 64 65 66
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
67
	struct qla_hw_data *ha = vha->hw;
68
	unsigned long flags = 0;
69

70
	mutex_lock(&ha->vport_lock);
71 72 73 74 75 76 77
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
78
	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
79
	    10*HZ);
80

81 82 83 84 85
	spin_lock_irqsave(&ha->vport_slock, flags);
	if (atomic_read(&vha->vref_count)) {
		ql_dbg(ql_dbg_vport, vha, 0xfffa,
		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
86 87
	}
	list_del(&vha->list);
88
	qlt_update_vp_map(vha, RESET_VP_IDX);
89 90
	spin_unlock_irqrestore(&ha->vport_slock, flags);

91 92
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
93
	clear_bit(vp_id, ha->vp_idx_map);
94

95
	mutex_unlock(&ha->vport_lock);
96 97
}

A
Adrian Bunk 已提交
98
static scsi_qla_host_t *
99
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
100 101
{
	scsi_qla_host_t *vha;
102
	struct scsi_qla_host *tvha;
103
	unsigned long flags;
104

105
	spin_lock_irqsave(&ha->vport_slock, flags);
106
	/* Locate matching device in database. */
107
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
108 109
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
110
			return vha;
111
		}
112
	}
113
	spin_unlock_irqrestore(&ha->vport_slock, flags);
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
130
static void
131 132
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
133 134 135 136 137 138
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
139 140
	fc_port_t *fcport;

141
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
142 143
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
144
		    fcport->loop_id, fcport->vha->vp_idx);
145 146

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
147
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
148 149 150 151 152 153
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
154
	unsigned long flags;
155
	int ret = QLA_SUCCESS;
156
	fc_port_t *fcport;
157

158 159 160
	if (vha->hw->flags.fw_started)
		ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

161 162
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
163 164 165 166
	list_for_each_entry(fcport, &vha->vp_fcports, list)
		fcport->logout_on_delete = 0;

	qla2x00_mark_all_devices_lost(vha, 0);
167

168
	/* Remove port id from vp target map */
169
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
170
	qlt_update_vp_map(vha, RESET_AL_PA);
171
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
189 190
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
191 192

	/* Check if physical ha port is Up */
193
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
194 195
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
196 197
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
198 199 200 201 202
		ql_dbg(ql_dbg_taskm, vha, 0x800b,
		    "%s skip enable. loop_state %x topo %x\n",
		    __func__, base_vha->loop_state.counter,
		    ha->current_topology);

203 204 205 206
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
207
	mutex_lock(&ha->vport_lock);
208
	ret = qla24xx_modify_vp_config(vha);
209
	mutex_unlock(&ha->vport_lock);
210 211 212 213 214 215

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

216 217
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
218 219 220
	return 0;

enable_failed:
221 222
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
223 224 225
	return 1;
}

226
static void
227 228 229 230 231 232 233
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

234 235
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
236 237
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
238 239
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
255
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
256
{
257
	scsi_qla_host_t *vha;
258
	struct qla_hw_data *ha = rsp->hw;
259
	int i = 0;
260
	unsigned long flags;
261

262 263
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
264
		if (vha->vp_idx) {
265 266 267
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

268 269 270 271 272 273 274
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
275 276 277
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
278
				qla2x00_async_event(vha, rsp, mb);
279
				break;
280 281 282 283 284 285 286 287 288
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
				if ((mb[3] & 0xff) == vha->vp_idx) {
					ql_dbg(ql_dbg_async, vha, 0x5024,
					    "Async_event for VP[%d], mb=0x%x vha=%p\n",
					    i, *mb, vha);
					qla2x00_async_event(vha, rsp, mb);
				}
				break;
289
			}
290 291 292

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
293
			wake_up(&vha->vref_waitq);
294
		}
295
		i++;
296
	}
297
	spin_unlock_irqrestore(&ha->vport_slock, flags);
298 299
}

300
int
301 302 303 304 305 306 307 308 309 310 311 312 313 314
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

315 316 317 318 319
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
320 321 322
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

323 324
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
325
	return qla24xx_enable_vp(vha);
326 327
}

A
Adrian Bunk 已提交
328
static int
329 330
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
331 332 333
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

334 335
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
336

337 338 339 340 341 342 343 344 345 346 347
	/* Check if Fw is ready to configure VP first */
	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
			/* VP acquired. complete port configuration */
			ql_dbg(ql_dbg_dpc, vha, 0x4014,
			    "Configure VP scheduled.\n");
			qla24xx_configure_vp(vha);
			ql_dbg(ql_dbg_dpc, vha, 0x4015,
			    "Configure VP end.\n");
			return 0;
		}
348 349
	}

350
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
351 352
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
353 354
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
355 356
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
357 358
	}

359 360 361 362 363 364 365 366 367 368 369
	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
	    atomic_read(&vha->loop_state) != LOOP_DOWN) {

		if (!vha->relogin_jif ||
		    time_after_eq(jiffies, vha->relogin_jif)) {
			vha->relogin_jif = jiffies + HZ;
			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);

			ql_dbg(ql_dbg_dpc, vha, 0x4018,
			    "Relogin needed scheduled.\n");
370
			qla24xx_post_relogin_work(vha);
371
		}
372
	}
373 374 375 376 377 378

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

379
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
380
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
381 382
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
383 384
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
385 386
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
387 388 389
		}
	}

390
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
391
	    "Exiting %s.\n", __func__);
392 393 394 395
	return 0;
}

void
396
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
397
{
398 399
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
400
	unsigned long flags = 0;
401

402
	if (vha->vp_idx)
403 404 405 406
		return;
	if (list_empty(&ha->vp_list))
		return;

407
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
408

409 410 411
	if (!(ha->current_topology & ISP_CFG_F))
		return;

412 413 414 415 416 417
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

418
			qla2x00_do_dpc_vp(vp);
419 420 421 422

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
423
	}
424
	spin_unlock_irqrestore(&ha->vport_slock, flags);
425 426 427 428 429
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
430 431
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
448
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
449
		return VPCERR_BAD_WWN;
450 451 452 453 454 455
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
456 457 458 459
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
460 461 462 463 464 465 466 467
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
468 469
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
470
	scsi_qla_host_t *vha;
471
	struct scsi_host_template *sht = &qla2xxx_driver_template;
472 473
	struct Scsi_Host *host;

474 475
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
476 477
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
478 479 480
		return(NULL);
	}

481
	host = vha->host;
482 483 484 485 486 487 488 489 490
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
491 492
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
493
		goto create_vhost_failed;
494
	}
495
	vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
496 497 498 499 500 501 502 503 504 505 506

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

507
	qla2x00_start_timer(vha, WATCH_INTERVAL);
508

509 510
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
511
	host->cmd_per_lun = 3;
512
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
513 514 515
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
516
	host->max_channel = MAX_BUSES - 1;
517
	host->max_lun = ql2xmaxlun;
518
	host->unique_id = host->host_no;
519
	host->max_id = ha->max_fibre_devices;
520 521
	host->transportt = qla2xxx_transport_vport_template;

522 523 524
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
525 526 527

	vha->flags.init_done = 1;

528 529 530 531 532
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

533 534
	return vha;

535
create_vhost_failed:
536 537
	return NULL;
}
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
555
	kfree(req->outstanding_cmds);
556 557 558 559 560 561 562 563 564 565 566
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
567
		free_irq(rsp->msix->vector, rsp->msix->handle);
568
		rsp->msix->have_irq = 0;
569
		rsp->msix->in_use = 0;
570
		rsp->msix->handle = NULL;
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
589
	int ret = QLA_SUCCESS;
590

591
	if (req && vha->flags.qpairs_req_created) {
592
		req->options |= BIT_0;
593
		ret = qla25xx_init_req_que(vha, req);
594 595
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
596 597

		qla25xx_free_req_que(vha, req);
598 599 600 601 602
	}

	return ret;
}

603
int
604 605
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
606
	int ret = QLA_SUCCESS;
607

608
	if (rsp && vha->flags.qpairs_rsp_created) {
609
		rsp->options |= BIT_0;
610
		ret = qla25xx_init_rsp_que(vha, rsp);
611 612
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
613 614

		qla25xx_free_rsp_que(vha, rsp);
615 616 617 618 619 620 621
	}

	return ret;
}

/* Delete all queues for a given vhost */
int
622
qla25xx_delete_queues(struct scsi_qla_host *vha)
623 624 625 626 627
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;
628
	struct qla_qpair *qpair, *tqpair;
629

630
	if (ql2xmqsupport || ql2xnvmeenable) {
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
		    qp_list_elem)
			qla2xxx_delete_qpair(vha, qpair);
	} else {
		/* Delete request queues */
		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
			req = ha->req_q_map[cnt];
			if (req && test_bit(cnt, ha->req_qid_map)) {
				ret = qla25xx_delete_req_que(vha, req);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00ea,
					    "Couldn't delete req que %d.\n",
					    req->id);
					return ret;
				}
646 647
			}
		}
648

649 650 651 652 653 654 655 656 657 658 659
		/* Delete response queues */
		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
			rsp = ha->rsp_q_map[cnt];
			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
				ret = qla25xx_delete_rsp_que(vha, rsp);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00eb,
					    "Couldn't delete rsp que %d.\n",
					    rsp->id);
					return ret;
				}
660 661 662
			}
		}
	}
663

664 665 666 667 668
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
669
    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
670 671 672 673
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
674
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
675
	uint16_t que_id = 0;
676
	device_reg_t *reg;
677
	uint32_t cnt;
678 679 680

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
681 682
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
683
		goto failed;
684 685 686 687 688 689 690
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
691
		ql_log(ql_log_fatal, base_vha, 0x00da,
692
		    "Failed to allocate memory for request_ring.\n");
693 694 695
		goto que_failed;
	}

696 697 698 699
	ret = qla2x00_alloc_outstanding_cmds(ha, req);
	if (ret != QLA_SUCCESS)
		goto que_failed;

700
	mutex_lock(&ha->mq_lock);
701 702
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
703
		mutex_unlock(&ha->mq_lock);
704 705
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
706 707 708 709 710 711 712 713
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

714 715 716 717 718 719
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
720 721 722
	if (rsp_que < 0)
		req->rsp = NULL;
	else
723 724 725 726 727 728 729 730
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
731

732 733 734 735
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
736
	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
737 738 739
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

740 741 742 743
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
744
	reg = ISP_QUE_REG(ha, que_id);
745 746
	req->req_q_in = &reg->isp25mq.req_q_in;
	req->req_q_out = &reg->isp25mq.req_q_out;
747
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
748
	req->out_ptr = (void *)(req->ring + req->length);
749
	mutex_unlock(&ha->mq_lock);
750 751 752 753 754 755 756 757 758 759
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
760

761 762 763 764 765 766 767 768 769 770
	if (startqp) {
		ret = qla25xx_init_req_que(base_vha, req);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00df,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->req_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
771
		vha->flags.qpairs_req_created = 1;
772 773 774 775 776 777
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
778
failed:
779 780 781
	return 0;
}

782 783
static void qla_do_work(struct work_struct *work)
{
784
	unsigned long flags;
785
	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
786
	struct scsi_qla_host *vha;
787
	struct qla_hw_data *ha = qpair->hw;
788

789
	spin_lock_irqsave(&qpair->qp_lock, flags);
790
	vha = pci_get_drvdata(ha->pdev);
791 792
	qla24xx_process_response_queue(vha, qpair->rsp);
	spin_unlock_irqrestore(&qpair->qp_lock, flags);
793

794 795
}

796 797 798
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
799
    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
800 801 802 803
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
804
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
805
	uint16_t que_id = 0;
806
	device_reg_t *reg;
807 808 809

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
810 811
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
812
		goto failed;
813 814
	}

815
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
816 817 818 819
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
820 821
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
822 823 824
		goto que_failed;
	}

825
	mutex_lock(&ha->mq_lock);
826 827
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
828
		mutex_unlock(&ha->mq_lock);
829 830
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
831 832 833 834
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

835
	rsp->msix = qpair->msix;
836 837 838 839 840

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
841
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
842
	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
843
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
844 845 846 847 848 849
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
850 851 852 853
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

854 855 856
	/* Set option to indicate response queue creation */
	options |= BIT_1;

857 858
	rsp->options = options;
	rsp->id = que_id;
859 860 861
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
862
	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
863
	mutex_unlock(&ha->mq_lock);
864
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
865
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
866 867 868
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
869
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
870 871
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
872

873 874
	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
	    QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
875 876 877
	if (ret)
		goto que_failed;

878 879 880 881 882 883 884 885 886 887
	if (startqp) {
		ret = qla25xx_init_rsp_que(base_vha, rsp);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00e7,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->rsp_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
888
		vha->flags.qpairs_rsp_created = 1;
889
	}
890
	rsp->req = NULL;
891 892

	qla2x00_init_response_q_entries(rsp);
893 894
	if (qpair->hw->wq)
		INIT_WORK(&qpair->q_work, qla_do_work);
895 896 897 898
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
899
failed:
900 901
	return 0;
}
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939

static void qla_ctrlvp_sp_done(void *s, int res)
{
	struct srb *sp = s;

	complete(&sp->comp);
	/* don't free sp here. Let the caller do the free */
}

/**
 * qla24xx_control_vp() - Enable a virtual port for given host
 * @vha:	adapter block pointer
 * @cmd:	command type to be sent for enable virtual port
 *
 * Return:	qla2xxx local function return status code.
 */
int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
{
	int rval = QLA_MEMORY_ALLOC_FAILED;
	struct qla_hw_data *ha = vha->hw;
	int	vp_index = vha->vp_idx;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	srb_t *sp;

	ql_dbg(ql_dbg_vport, vha, 0x10c1,
	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);

	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
		return QLA_PARAMETER_ERROR;

	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
	if (!sp)
		goto done;

	sp->type = SRB_CTRL_VP;
	sp->name = "ctrl_vp";
	sp->done = qla_ctrlvp_sp_done;
	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940
	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
		    "%s: %s Failed submission. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}

	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
	    sp->name, sp->handle);

	wait_for_completion(&sp->comp);
	rval = sp->rc;
	switch (rval) {
	case QLA_FUNCTION_TIMEOUT:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
		    __func__, sp->name, rval);
		break;
	case QLA_SUCCESS:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
		    __func__, sp->name);
		goto done_free_sp;
	default:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}
done:
	return rval;

done_free_sp:
	sp->free(sp);
	return rval;
}