qla_mid.c 24.7 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9
#include "qla_target.h"
10 11 12

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20 21 22
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
23
	if (vha->vp_idx && vha->timer_active) {
24 25 26 27 28
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
29
static uint32_t
30 31 32
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
33
	struct qla_hw_data *ha = vha->hw;
34
	unsigned long flags;
35 36

	/* Find an empty slot and assign an vp_id */
37
	mutex_lock(&ha->vport_lock);
38 39
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
40 41 42
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
43
		mutex_unlock(&ha->vport_lock);
44 45 46
		return vp_id;
	}

47
	set_bit(vp_id, ha->vp_idx_map);
48 49
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
50 51

	spin_lock_irqsave(&ha->vport_slock, flags);
52
	list_add_tail(&vha->list, &ha->vp_list);
53 54 55

	qlt_update_vp_map(vha, SET_VP_IDX);

56 57
	spin_unlock_irqrestore(&ha->vport_slock, flags);

58
	mutex_unlock(&ha->vport_lock);
59 60 61 62 63 64 65
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
66
	struct qla_hw_data *ha = vha->hw;
67
	unsigned long flags = 0;
68

69
	mutex_lock(&ha->vport_lock);
70 71 72 73 74 75 76
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
77
	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
78
	    10*HZ);
79

80 81 82 83 84
	spin_lock_irqsave(&ha->vport_slock, flags);
	if (atomic_read(&vha->vref_count)) {
		ql_dbg(ql_dbg_vport, vha, 0xfffa,
		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
85 86
	}
	list_del(&vha->list);
87
	qlt_update_vp_map(vha, RESET_VP_IDX);
88 89
	spin_unlock_irqrestore(&ha->vport_slock, flags);

90 91
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
92
	clear_bit(vp_id, ha->vp_idx_map);
93

94
	mutex_unlock(&ha->vport_lock);
95 96
}

A
Adrian Bunk 已提交
97
static scsi_qla_host_t *
98
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
99 100
{
	scsi_qla_host_t *vha;
101
	struct scsi_qla_host *tvha;
102
	unsigned long flags;
103

104
	spin_lock_irqsave(&ha->vport_slock, flags);
105
	/* Locate matching device in database. */
106
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
107 108
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
109
			return vha;
110
		}
111
	}
112
	spin_unlock_irqrestore(&ha->vport_slock, flags);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
129
static void
130 131
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
132 133 134 135 136 137
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
138 139
	fc_port_t *fcport;

140
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
141 142
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
143
		    fcport->loop_id, fcport->vha->vp_idx);
144 145

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
146
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
147 148 149 150 151 152
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
153
	unsigned long flags;
154 155 156 157 158 159
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

160
	/* Remove port id from vp target map */
161
	spin_lock_irqsave(&vha->hw->vport_slock, flags);
162
	qlt_update_vp_map(vha, RESET_AL_PA);
163
	spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
181 182
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
183 184

	/* Check if physical ha port is Up */
185
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
186 187
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
188 189
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
190 191 192 193 194
		ql_dbg(ql_dbg_taskm, vha, 0x800b,
		    "%s skip enable. loop_state %x topo %x\n",
		    __func__, base_vha->loop_state.counter,
		    ha->current_topology);

195 196 197 198
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
199
	mutex_lock(&ha->vport_lock);
200
	ret = qla24xx_modify_vp_config(vha);
201
	mutex_unlock(&ha->vport_lock);
202 203 204 205 206 207

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

208 209
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
210 211 212
	return 0;

enable_failed:
213 214
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
215 216 217
	return 1;
}

218
static void
219 220 221 222 223 224 225
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

226 227
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
228 229
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
230 231
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
247
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248
{
249
	scsi_qla_host_t *vha;
250
	struct qla_hw_data *ha = rsp->hw;
251
	int i = 0;
252
	unsigned long flags;
253

254 255
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
256
		if (vha->vp_idx) {
257 258 259
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

260 261 262 263 264 265 266 267 268
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
269 270 271
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
272
				qla2x00_async_event(vha, rsp, mb);
273 274
				break;
			}
275 276 277

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
278
			wake_up(&vha->vref_waitq);
279
		}
280
		i++;
281
	}
282
	spin_unlock_irqrestore(&ha->vport_slock, flags);
283 284
}

285
int
286 287 288 289 290 291 292 293 294 295 296 297 298 299
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

300 301 302 303 304
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
305 306 307
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

308 309
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
310
	return qla24xx_enable_vp(vha);
311 312
}

A
Adrian Bunk 已提交
313
static int
314 315
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
316 317 318
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

319 320
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
321

322 323
	qla2x00_do_work(vha);

324 325 326 327 328 329 330 331 332 333 334
	/* Check if Fw is ready to configure VP first */
	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
			/* VP acquired. complete port configuration */
			ql_dbg(ql_dbg_dpc, vha, 0x4014,
			    "Configure VP scheduled.\n");
			qla24xx_configure_vp(vha);
			ql_dbg(ql_dbg_dpc, vha, 0x4015,
			    "Configure VP end.\n");
			return 0;
		}
335 336
	}

337
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
338 339
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
340 341
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
342 343
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
344 345
	}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
	    atomic_read(&vha->loop_state) != LOOP_DOWN) {

		if (!vha->relogin_jif ||
		    time_after_eq(jiffies, vha->relogin_jif)) {
			vha->relogin_jif = jiffies + HZ;
			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);

			ql_dbg(ql_dbg_dpc, vha, 0x4018,
			    "Relogin needed scheduled.\n");
			qla2x00_relogin(vha);
			ql_dbg(ql_dbg_dpc, vha, 0x4019,
			    "Relogin needed end.\n");
		}
361
	}
362 363 364 365 366 367

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

368
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
369
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
370 371
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
372 373
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
374 375
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
376 377 378
		}
	}

379
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
380
	    "Exiting %s.\n", __func__);
381 382 383 384
	return 0;
}

void
385
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
386
{
387 388
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
389
	unsigned long flags = 0;
390

391
	if (vha->vp_idx)
392 393 394 395
		return;
	if (list_empty(&ha->vp_list))
		return;

396
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
397

398 399 400
	if (!(ha->current_topology & ISP_CFG_F))
		return;

401 402 403 404 405 406
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

407
			qla2x00_do_dpc_vp(vp);
408 409 410 411

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
412
	}
413
	spin_unlock_irqrestore(&ha->vport_slock, flags);
414 415 416 417 418
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
419 420
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
437
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
438
		return VPCERR_BAD_WWN;
439 440 441 442 443 444
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
445 446 447 448
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
449 450 451 452 453 454 455 456
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
457 458
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
459
	scsi_qla_host_t *vha;
460
	struct scsi_host_template *sht = &qla2xxx_driver_template;
461 462
	struct Scsi_Host *host;

463 464
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
465 466
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
467 468 469
		return(NULL);
	}

470
	host = vha->host;
471 472 473 474 475 476 477 478 479
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
480 481
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
482
		goto create_vhost_failed;
483 484 485 486 487 488 489 490 491 492 493 494 495
	}
	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

496
	qla2x00_start_timer(vha, WATCH_INTERVAL);
497

498 499
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
500
	host->cmd_per_lun = 3;
501
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
502 503 504
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
505
	host->max_channel = MAX_BUSES - 1;
506
	host->max_lun = ql2xmaxlun;
507
	host->unique_id = host->host_no;
508
	host->max_id = ha->max_fibre_devices;
509 510
	host->transportt = qla2xxx_transport_vport_template;

511 512 513
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
514 515 516

	vha->flags.init_done = 1;

517 518 519 520 521
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

522 523
	return vha;

524
create_vhost_failed:
525 526
	return NULL;
}
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
544
	kfree(req->outstanding_cmds);
545 546 547 548 549 550 551 552 553 554 555
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
556
		free_irq(rsp->msix->vector, rsp->msix->handle);
557
		rsp->msix->have_irq = 0;
558
		rsp->msix->in_use = 0;
559
		rsp->msix->handle = NULL;
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
578
	int ret = QLA_SUCCESS;
579

580
	if (req && vha->flags.qpairs_req_created) {
581
		req->options |= BIT_0;
582
		ret = qla25xx_init_req_que(vha, req);
583 584
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
585 586

		qla25xx_free_req_que(vha, req);
587 588 589 590 591
	}

	return ret;
}

592
int
593 594
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
595
	int ret = QLA_SUCCESS;
596

597
	if (rsp && vha->flags.qpairs_rsp_created) {
598
		rsp->options |= BIT_0;
599
		ret = qla25xx_init_rsp_que(vha, rsp);
600 601
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
602 603

		qla25xx_free_rsp_que(vha, rsp);
604 605 606 607 608 609 610
	}

	return ret;
}

/* Delete all queues for a given vhost */
int
611
qla25xx_delete_queues(struct scsi_qla_host *vha)
612 613 614 615 616
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;
617
	struct qla_qpair *qpair, *tqpair;
618

619
	if (ql2xmqsupport || ql2xnvmeenable) {
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
		    qp_list_elem)
			qla2xxx_delete_qpair(vha, qpair);
	} else {
		/* Delete request queues */
		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
			req = ha->req_q_map[cnt];
			if (req && test_bit(cnt, ha->req_qid_map)) {
				ret = qla25xx_delete_req_que(vha, req);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00ea,
					    "Couldn't delete req que %d.\n",
					    req->id);
					return ret;
				}
635 636
			}
		}
637

638 639 640 641 642 643 644 645 646 647 648
		/* Delete response queues */
		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
			rsp = ha->rsp_q_map[cnt];
			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
				ret = qla25xx_delete_rsp_que(vha, rsp);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00eb,
					    "Couldn't delete rsp que %d.\n",
					    rsp->id);
					return ret;
				}
649 650 651
			}
		}
	}
652

653 654 655 656 657
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
658
    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
659 660 661 662
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
663
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
664
	uint16_t que_id = 0;
665
	device_reg_t *reg;
666
	uint32_t cnt;
667 668 669

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
670 671
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
672
		goto failed;
673 674 675 676 677 678 679
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
680
		ql_log(ql_log_fatal, base_vha, 0x00da,
681
		    "Failed to allocate memory for request_ring.\n");
682 683 684
		goto que_failed;
	}

685 686 687 688
	ret = qla2x00_alloc_outstanding_cmds(ha, req);
	if (ret != QLA_SUCCESS)
		goto que_failed;

689
	mutex_lock(&ha->mq_lock);
690 691
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
692
		mutex_unlock(&ha->mq_lock);
693 694
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
695 696 697 698 699 700 701 702
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

703 704 705 706 707 708
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
709 710 711
	if (rsp_que < 0)
		req->rsp = NULL;
	else
712 713 714 715 716 717 718 719
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
720

721 722 723 724
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
725
	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
726 727 728
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

729 730 731 732
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
733
	reg = ISP_QUE_REG(ha, que_id);
734 735
	req->req_q_in = &reg->isp25mq.req_q_in;
	req->req_q_out = &reg->isp25mq.req_q_out;
736
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
737
	req->out_ptr = (void *)(req->ring + req->length);
738
	mutex_unlock(&ha->mq_lock);
739 740 741 742 743 744 745 746 747 748
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
749

750 751 752 753 754 755 756 757 758 759
	if (startqp) {
		ret = qla25xx_init_req_que(base_vha, req);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00df,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->req_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
760
		vha->flags.qpairs_req_created = 1;
761 762 763 764 765 766
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
767
failed:
768 769 770
	return 0;
}

771 772
static void qla_do_work(struct work_struct *work)
{
773
	unsigned long flags;
774
	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
775
	struct scsi_qla_host *vha;
776
	struct qla_hw_data *ha = qpair->hw;
777
	struct srb_iocb	*nvme, *nxt_nvme;
778

779
	spin_lock_irqsave(&qpair->qp_lock, flags);
780
	vha = pci_get_drvdata(ha->pdev);
781 782
	qla24xx_process_response_queue(vha, qpair->rsp);
	spin_unlock_irqrestore(&qpair->qp_lock, flags);
783 784 785 786 787 788

	list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
		    u.nvme.entry) {
		list_del_init(&nvme->u.nvme.entry);
		qla_nvme_cmpl_io(nvme);
	}
789 790
}

791 792 793
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
794
    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
795 796 797 798
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
799
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
800
	uint16_t que_id = 0;
801
	device_reg_t *reg;
802 803 804

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
805 806
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
807
		goto failed;
808 809
	}

810
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
811 812 813 814
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
815 816
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
817 818 819
		goto que_failed;
	}

820
	mutex_lock(&ha->mq_lock);
821 822
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
823
		mutex_unlock(&ha->mq_lock);
824 825
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
826 827 828 829
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

830
	rsp->msix = qpair->msix;
831 832 833 834 835

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
836
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
837
	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
838
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
839 840 841 842 843 844
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
845 846 847 848
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

849 850 851
	/* Set option to indicate response queue creation */
	options |= BIT_1;

852 853
	rsp->options = options;
	rsp->id = que_id;
854 855 856
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
857
	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
858
	mutex_unlock(&ha->mq_lock);
859
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
860
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
861 862 863
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
864
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
865 866
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
867

868 869
	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
	    QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
870 871 872
	if (ret)
		goto que_failed;

873 874 875 876 877 878 879 880 881 882
	if (startqp) {
		ret = qla25xx_init_rsp_que(base_vha, rsp);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00e7,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->rsp_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
883
		vha->flags.qpairs_rsp_created = 1;
884
	}
885
	rsp->req = NULL;
886 887

	qla2x00_init_response_q_entries(rsp);
888 889
	if (qpair->hw->wq)
		INIT_WORK(&qpair->q_work, qla_do_work);
890 891 892 893
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
894
failed:
895 896
	return 0;
}
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972

static void qla_ctrlvp_sp_done(void *s, int res)
{
	struct srb *sp = s;

	complete(&sp->comp);
	/* don't free sp here. Let the caller do the free */
}

/**
 * qla24xx_control_vp() - Enable a virtual port for given host
 * @vha:	adapter block pointer
 * @cmd:	command type to be sent for enable virtual port
 *
 * Return:	qla2xxx local function return status code.
 */
int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
{
	int rval = QLA_MEMORY_ALLOC_FAILED;
	struct qla_hw_data *ha = vha->hw;
	int	vp_index = vha->vp_idx;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	srb_t *sp;

	ql_dbg(ql_dbg_vport, vha, 0x10c1,
	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);

	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
		return QLA_PARAMETER_ERROR;

	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
	if (!sp)
		goto done;

	sp->type = SRB_CTRL_VP;
	sp->name = "ctrl_vp";
	sp->done = qla_ctrlvp_sp_done;
	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
		    "%s: %s Failed submission. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}

	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
	    sp->name, sp->handle);

	wait_for_completion(&sp->comp);
	rval = sp->rc;
	switch (rval) {
	case QLA_FUNCTION_TIMEOUT:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
		    __func__, sp->name, rval);
		break;
	case QLA_SUCCESS:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
		    __func__, sp->name);
		goto done_free_sp;
	default:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}
done:
	return rval;

done_free_sp:
	sp->free(sp);
	return rval;
}