qla_mid.c 24.8 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9
#include "qla_target.h"
10 11 12

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20 21 22
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
23
	if (vha->vp_idx && vha->timer_active) {
24 25 26 27 28
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
29
static uint32_t
30 31 32
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
33
	struct qla_hw_data *ha = vha->hw;
34
	unsigned long flags;
35 36

	/* Find an empty slot and assign an vp_id */
37
	mutex_lock(&ha->vport_lock);
38 39
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
40 41 42
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
43
		mutex_unlock(&ha->vport_lock);
44 45 46
		return vp_id;
	}

47
	set_bit(vp_id, ha->vp_idx_map);
48 49
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
50 51

	spin_lock_irqsave(&ha->vport_slock, flags);
52
	list_add_tail(&vha->list, &ha->vp_list);
53
	spin_unlock_irqrestore(&ha->vport_slock, flags);
54

55
	spin_lock_irqsave(&ha->hardware_lock, flags);
56
	qlt_update_vp_map(vha, SET_VP_IDX);
57
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
58

59
	mutex_unlock(&ha->vport_lock);
60 61 62 63 64 65 66
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
67
	struct qla_hw_data *ha = vha->hw;
68
	unsigned long flags = 0;
69

70
	mutex_lock(&ha->vport_lock);
71 72 73 74 75 76 77
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
78
	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
79
	    10*HZ);
80

81 82 83 84 85
	spin_lock_irqsave(&ha->vport_slock, flags);
	if (atomic_read(&vha->vref_count)) {
		ql_dbg(ql_dbg_vport, vha, 0xfffa,
		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
86 87
	}
	list_del(&vha->list);
88
	qlt_update_vp_map(vha, RESET_VP_IDX);
89 90
	spin_unlock_irqrestore(&ha->vport_slock, flags);

91 92
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
93
	clear_bit(vp_id, ha->vp_idx_map);
94

95
	mutex_unlock(&ha->vport_lock);
96 97
}

A
Adrian Bunk 已提交
98
static scsi_qla_host_t *
99
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
100 101
{
	scsi_qla_host_t *vha;
102
	struct scsi_qla_host *tvha;
103
	unsigned long flags;
104

105
	spin_lock_irqsave(&ha->vport_slock, flags);
106
	/* Locate matching device in database. */
107
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
108 109
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
110
			return vha;
111
		}
112
	}
113
	spin_unlock_irqrestore(&ha->vport_slock, flags);
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
130
static void
131 132
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
133 134 135 136 137 138
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
139 140
	fc_port_t *fcport;

141
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
142 143
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
144
		    fcport->loop_id, fcport->vha->vp_idx);
145 146

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
147
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
148 149 150 151 152 153
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
154
	unsigned long flags;
155 156 157 158 159 160
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

161
	/* Remove port id from vp target map */
162
	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
163
	qlt_update_vp_map(vha, RESET_AL_PA);
164
	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
165

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
182 183
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
184 185

	/* Check if physical ha port is Up */
186
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
187 188
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
189 190
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
191 192 193 194 195
		ql_dbg(ql_dbg_taskm, vha, 0x800b,
		    "%s skip enable. loop_state %x topo %x\n",
		    __func__, base_vha->loop_state.counter,
		    ha->current_topology);

196 197 198 199
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
200
	mutex_lock(&ha->vport_lock);
201
	ret = qla24xx_modify_vp_config(vha);
202
	mutex_unlock(&ha->vport_lock);
203 204 205 206 207 208

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

209 210
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
211 212 213
	return 0;

enable_failed:
214 215
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
216 217 218
	return 1;
}

219
static void
220 221 222 223 224 225 226
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

227 228
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
229 230
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
231 232
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
248
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
249
{
250
	scsi_qla_host_t *vha;
251
	struct qla_hw_data *ha = rsp->hw;
252
	int i = 0;
253
	unsigned long flags;
254

255 256
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
257
		if (vha->vp_idx) {
258 259 260
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

261 262 263 264 265 266 267
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
268 269 270
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
271
				qla2x00_async_event(vha, rsp, mb);
272
				break;
273 274 275 276 277 278 279 280 281
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
				if ((mb[3] & 0xff) == vha->vp_idx) {
					ql_dbg(ql_dbg_async, vha, 0x5024,
					    "Async_event for VP[%d], mb=0x%x vha=%p\n",
					    i, *mb, vha);
					qla2x00_async_event(vha, rsp, mb);
				}
				break;
282
			}
283 284 285

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
286
			wake_up(&vha->vref_waitq);
287
		}
288
		i++;
289
	}
290
	spin_unlock_irqrestore(&ha->vport_slock, flags);
291 292
}

293
int
294 295 296 297 298 299 300 301 302 303 304 305 306 307
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

308 309 310 311 312
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
313 314 315
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

316 317
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
318
	return qla24xx_enable_vp(vha);
319 320
}

A
Adrian Bunk 已提交
321
static int
322 323
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
324 325 326
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

327 328
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
329

330 331 332 333 334 335 336 337 338 339 340
	/* Check if Fw is ready to configure VP first */
	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
			/* VP acquired. complete port configuration */
			ql_dbg(ql_dbg_dpc, vha, 0x4014,
			    "Configure VP scheduled.\n");
			qla24xx_configure_vp(vha);
			ql_dbg(ql_dbg_dpc, vha, 0x4015,
			    "Configure VP end.\n");
			return 0;
		}
341 342
	}

343
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
344 345
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
346 347
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
348 349
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
350 351
	}

352 353 354 355 356 357 358 359 360 361 362
	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
	    atomic_read(&vha->loop_state) != LOOP_DOWN) {

		if (!vha->relogin_jif ||
		    time_after_eq(jiffies, vha->relogin_jif)) {
			vha->relogin_jif = jiffies + HZ;
			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);

			ql_dbg(ql_dbg_dpc, vha, 0x4018,
			    "Relogin needed scheduled.\n");
363
			qla24xx_post_relogin_work(vha);
364
		}
365
	}
366 367 368 369 370 371

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

372
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
373
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
374 375
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
376 377
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
378 379
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
380 381 382
		}
	}

383
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
384
	    "Exiting %s.\n", __func__);
385 386 387 388
	return 0;
}

void
389
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
390
{
391 392
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
393
	unsigned long flags = 0;
394

395
	if (vha->vp_idx)
396 397 398 399
		return;
	if (list_empty(&ha->vp_list))
		return;

400
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
401

402 403 404
	if (!(ha->current_topology & ISP_CFG_F))
		return;

405 406 407 408 409 410
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

411
			qla2x00_do_dpc_vp(vp);
412 413 414 415

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
416
	}
417
	spin_unlock_irqrestore(&ha->vport_slock, flags);
418 419 420 421 422
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
423 424
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
441
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
442
		return VPCERR_BAD_WWN;
443 444 445 446 447 448
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
449 450 451 452
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
453 454 455 456 457 458 459 460
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
461 462
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
463
	scsi_qla_host_t *vha;
464
	struct scsi_host_template *sht = &qla2xxx_driver_template;
465 466
	struct Scsi_Host *host;

467 468
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
469 470
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
471 472 473
		return(NULL);
	}

474
	host = vha->host;
475 476 477 478 479 480 481 482 483
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
484 485
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
486
		goto create_vhost_failed;
487
	}
488
	vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
489 490 491 492 493 494 495 496 497 498 499

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

500
	qla2x00_start_timer(vha, WATCH_INTERVAL);
501

502 503
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
504
	host->cmd_per_lun = 3;
505
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
506 507 508
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
509
	host->max_channel = MAX_BUSES - 1;
510
	host->max_lun = ql2xmaxlun;
511
	host->unique_id = host->host_no;
512
	host->max_id = ha->max_fibre_devices;
513 514
	host->transportt = qla2xxx_transport_vport_template;

515 516 517
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
518 519 520

	vha->flags.init_done = 1;

521 522 523 524 525
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

526 527
	return vha;

528
create_vhost_failed:
529 530
	return NULL;
}
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
548
	kfree(req->outstanding_cmds);
549 550 551 552 553 554 555 556 557 558 559
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
560
		free_irq(rsp->msix->vector, rsp->msix->handle);
561
		rsp->msix->have_irq = 0;
562
		rsp->msix->in_use = 0;
563
		rsp->msix->handle = NULL;
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
582
	int ret = QLA_SUCCESS;
583

584
	if (req && vha->flags.qpairs_req_created) {
585
		req->options |= BIT_0;
586
		ret = qla25xx_init_req_que(vha, req);
587 588
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
589 590

		qla25xx_free_req_que(vha, req);
591 592 593 594 595
	}

	return ret;
}

596
int
597 598
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
599
	int ret = QLA_SUCCESS;
600

601
	if (rsp && vha->flags.qpairs_rsp_created) {
602
		rsp->options |= BIT_0;
603
		ret = qla25xx_init_rsp_que(vha, rsp);
604 605
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
606 607

		qla25xx_free_rsp_que(vha, rsp);
608 609 610 611 612 613 614
	}

	return ret;
}

/* Delete all queues for a given vhost */
int
615
qla25xx_delete_queues(struct scsi_qla_host *vha)
616 617 618 619 620
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;
621
	struct qla_qpair *qpair, *tqpair;
622

623
	if (ql2xmqsupport || ql2xnvmeenable) {
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
		    qp_list_elem)
			qla2xxx_delete_qpair(vha, qpair);
	} else {
		/* Delete request queues */
		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
			req = ha->req_q_map[cnt];
			if (req && test_bit(cnt, ha->req_qid_map)) {
				ret = qla25xx_delete_req_que(vha, req);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00ea,
					    "Couldn't delete req que %d.\n",
					    req->id);
					return ret;
				}
639 640
			}
		}
641

642 643 644 645 646 647 648 649 650 651 652
		/* Delete response queues */
		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
			rsp = ha->rsp_q_map[cnt];
			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
				ret = qla25xx_delete_rsp_que(vha, rsp);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00eb,
					    "Couldn't delete rsp que %d.\n",
					    rsp->id);
					return ret;
				}
653 654 655
			}
		}
	}
656

657 658 659 660 661
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
662
    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
663 664 665 666
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
667
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
668
	uint16_t que_id = 0;
669
	device_reg_t *reg;
670
	uint32_t cnt;
671 672 673

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
674 675
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
676
		goto failed;
677 678 679 680 681 682 683
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
684
		ql_log(ql_log_fatal, base_vha, 0x00da,
685
		    "Failed to allocate memory for request_ring.\n");
686 687 688
		goto que_failed;
	}

689 690 691 692
	ret = qla2x00_alloc_outstanding_cmds(ha, req);
	if (ret != QLA_SUCCESS)
		goto que_failed;

693
	mutex_lock(&ha->mq_lock);
694 695
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
696
		mutex_unlock(&ha->mq_lock);
697 698
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
699 700 701 702 703 704 705 706
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

707 708 709 710 711 712
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
713 714 715
	if (rsp_que < 0)
		req->rsp = NULL;
	else
716 717 718 719 720 721 722 723
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
724

725 726 727 728
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
729
	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
730 731 732
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

733 734 735 736
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
737
	reg = ISP_QUE_REG(ha, que_id);
738 739
	req->req_q_in = &reg->isp25mq.req_q_in;
	req->req_q_out = &reg->isp25mq.req_q_out;
740
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
741
	req->out_ptr = (void *)(req->ring + req->length);
742
	mutex_unlock(&ha->mq_lock);
743 744 745 746 747 748 749 750 751 752
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
753

754 755 756 757 758 759 760 761 762 763
	if (startqp) {
		ret = qla25xx_init_req_que(base_vha, req);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00df,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->req_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
764
		vha->flags.qpairs_req_created = 1;
765 766 767 768 769 770
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
771
failed:
772 773 774
	return 0;
}

775 776
static void qla_do_work(struct work_struct *work)
{
777
	unsigned long flags;
778
	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
779
	struct scsi_qla_host *vha;
780
	struct qla_hw_data *ha = qpair->hw;
781

782
	spin_lock_irqsave(&qpair->qp_lock, flags);
783
	vha = pci_get_drvdata(ha->pdev);
784 785
	qla24xx_process_response_queue(vha, qpair->rsp);
	spin_unlock_irqrestore(&qpair->qp_lock, flags);
786

787 788
}

789 790 791
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
792
    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
793 794 795 796
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
797
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
798
	uint16_t que_id = 0;
799
	device_reg_t *reg;
800 801 802

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
803 804
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
805
		goto failed;
806 807
	}

808
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
809 810 811 812
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
813 814
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
815 816 817
		goto que_failed;
	}

818
	mutex_lock(&ha->mq_lock);
819 820
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
821
		mutex_unlock(&ha->mq_lock);
822 823
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
824 825 826 827
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

828
	rsp->msix = qpair->msix;
829 830 831 832 833

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
834
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
835
	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
836
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
837 838 839 840 841 842
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
843 844 845 846
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

847 848 849
	/* Set option to indicate response queue creation */
	options |= BIT_1;

850 851
	rsp->options = options;
	rsp->id = que_id;
852 853 854
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
855
	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
856
	mutex_unlock(&ha->mq_lock);
857
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
858
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
859 860 861
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
862
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
863 864
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
865

866 867
	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
	    QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
868 869 870
	if (ret)
		goto que_failed;

871 872 873 874 875 876 877 878 879 880
	if (startqp) {
		ret = qla25xx_init_rsp_que(base_vha, rsp);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00e7,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->rsp_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
881
		vha->flags.qpairs_rsp_created = 1;
882
	}
883
	rsp->req = NULL;
884 885

	qla2x00_init_response_q_entries(rsp);
886 887
	if (qpair->hw->wq)
		INIT_WORK(&qpair->q_work, qla_do_work);
888 889 890 891
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
892
failed:
893 894
	return 0;
}
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932

static void qla_ctrlvp_sp_done(void *s, int res)
{
	struct srb *sp = s;

	complete(&sp->comp);
	/* don't free sp here. Let the caller do the free */
}

/**
 * qla24xx_control_vp() - Enable a virtual port for given host
 * @vha:	adapter block pointer
 * @cmd:	command type to be sent for enable virtual port
 *
 * Return:	qla2xxx local function return status code.
 */
int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
{
	int rval = QLA_MEMORY_ALLOC_FAILED;
	struct qla_hw_data *ha = vha->hw;
	int	vp_index = vha->vp_idx;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	srb_t *sp;

	ql_dbg(ql_dbg_vport, vha, 0x10c1,
	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);

	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
		return QLA_PARAMETER_ERROR;

	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
	if (!sp)
		goto done;

	sp->type = SRB_CTRL_VP;
	sp->name = "ctrl_vp";
	sp->done = qla_ctrlvp_sp_done;
	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
933
	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
		    "%s: %s Failed submission. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}

	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
	    sp->name, sp->handle);

	wait_for_completion(&sp->comp);
	rval = sp->rc;
	switch (rval) {
	case QLA_FUNCTION_TIMEOUT:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
		    __func__, sp->name, rval);
		break;
	case QLA_SUCCESS:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
		    __func__, sp->name);
		goto done_free_sp;
	default:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}
done:
	return rval;

done_free_sp:
	sp->free(sp);
	return rval;
}