qla_mid.c 24.7 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9
#include "qla_target.h"
10 11 12

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20 21 22
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
23
	if (vha->vp_idx && vha->timer_active) {
24 25 26 27 28
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
29
static uint32_t
30 31 32
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
33
	struct qla_hw_data *ha = vha->hw;
34
	unsigned long flags;
35 36

	/* Find an empty slot and assign an vp_id */
37
	mutex_lock(&ha->vport_lock);
38 39
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
40 41 42
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
43
		mutex_unlock(&ha->vport_lock);
44 45 46
		return vp_id;
	}

47
	set_bit(vp_id, ha->vp_idx_map);
48 49
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
50 51

	spin_lock_irqsave(&ha->vport_slock, flags);
52
	list_add_tail(&vha->list, &ha->vp_list);
53 54 55

	qlt_update_vp_map(vha, SET_VP_IDX);

56 57
	spin_unlock_irqrestore(&ha->vport_slock, flags);

58
	mutex_unlock(&ha->vport_lock);
59 60 61 62 63 64 65
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
66
	struct qla_hw_data *ha = vha->hw;
67
	unsigned long flags = 0;
68

69
	mutex_lock(&ha->vport_lock);
70 71 72 73 74 75 76
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
77
	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
78
	    10*HZ);
79

80 81 82 83 84
	spin_lock_irqsave(&ha->vport_slock, flags);
	if (atomic_read(&vha->vref_count)) {
		ql_dbg(ql_dbg_vport, vha, 0xfffa,
		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
85 86
	}
	list_del(&vha->list);
87
	qlt_update_vp_map(vha, RESET_VP_IDX);
88 89
	spin_unlock_irqrestore(&ha->vport_slock, flags);

90 91
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
92
	clear_bit(vp_id, ha->vp_idx_map);
93

94
	mutex_unlock(&ha->vport_lock);
95 96
}

A
Adrian Bunk 已提交
97
static scsi_qla_host_t *
98
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
99 100
{
	scsi_qla_host_t *vha;
101
	struct scsi_qla_host *tvha;
102
	unsigned long flags;
103

104
	spin_lock_irqsave(&ha->vport_slock, flags);
105
	/* Locate matching device in database. */
106
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
107 108
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
109
			return vha;
110
		}
111
	}
112
	spin_unlock_irqrestore(&ha->vport_slock, flags);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
129
static void
130 131
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
132 133 134 135 136 137
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
138 139
	fc_port_t *fcport;

140
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
141 142
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
143
		    fcport->loop_id, fcport->vha->vp_idx);
144 145

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
146
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
147 148 149 150 151 152
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
153
	unsigned long flags;
154 155 156 157 158 159
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

160
	/* Remove port id from vp target map */
161
	spin_lock_irqsave(&vha->hw->vport_slock, flags);
162
	qlt_update_vp_map(vha, RESET_AL_PA);
163
	spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
181 182
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
183 184

	/* Check if physical ha port is Up */
185
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
186 187
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
188 189
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
190 191 192 193 194
		ql_dbg(ql_dbg_taskm, vha, 0x800b,
		    "%s skip enable. loop_state %x topo %x\n",
		    __func__, base_vha->loop_state.counter,
		    ha->current_topology);

195 196 197 198
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
199
	mutex_lock(&ha->vport_lock);
200
	ret = qla24xx_modify_vp_config(vha);
201
	mutex_unlock(&ha->vport_lock);
202 203 204 205 206 207

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

208 209
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
210 211 212
	return 0;

enable_failed:
213 214
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
215 216 217
	return 1;
}

218
static void
219 220 221 222 223 224 225
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

226 227
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
228 229
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
230 231
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
247
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248
{
249
	scsi_qla_host_t *vha;
250
	struct qla_hw_data *ha = rsp->hw;
251
	int i = 0;
252
	unsigned long flags;
253

254 255
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
256
		if (vha->vp_idx) {
257 258 259
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

260 261 262 263 264 265 266 267 268
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
269 270 271
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
272
				qla2x00_async_event(vha, rsp, mb);
273 274
				break;
			}
275 276 277

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
278
			wake_up(&vha->vref_waitq);
279
		}
280
		i++;
281
	}
282
	spin_unlock_irqrestore(&ha->vport_slock, flags);
283 284
}

285
int
286 287 288 289 290 291 292 293 294 295 296 297 298 299
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

300 301 302 303 304
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
305 306 307
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

308 309
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
310
	return qla24xx_enable_vp(vha);
311 312
}

A
Adrian Bunk 已提交
313
static int
314 315
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
316 317 318
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

319 320
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
321

322 323 324 325 326 327 328 329 330 331 332
	/* Check if Fw is ready to configure VP first */
	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
			/* VP acquired. complete port configuration */
			ql_dbg(ql_dbg_dpc, vha, 0x4014,
			    "Configure VP scheduled.\n");
			qla24xx_configure_vp(vha);
			ql_dbg(ql_dbg_dpc, vha, 0x4015,
			    "Configure VP end.\n");
			return 0;
		}
333 334
	}

335
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
336 337
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
338 339
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
340 341
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
342 343
	}

344 345 346 347 348 349 350 351 352 353 354
	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
	    atomic_read(&vha->loop_state) != LOOP_DOWN) {

		if (!vha->relogin_jif ||
		    time_after_eq(jiffies, vha->relogin_jif)) {
			vha->relogin_jif = jiffies + HZ;
			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);

			ql_dbg(ql_dbg_dpc, vha, 0x4018,
			    "Relogin needed scheduled.\n");
355
			qla24xx_post_relogin_work(vha);
356
		}
357
	}
358 359 360 361 362 363

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

364
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
365
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
366 367
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
368 369
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
370 371
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
372 373 374
		}
	}

375
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
376
	    "Exiting %s.\n", __func__);
377 378 379 380
	return 0;
}

void
381
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
382
{
383 384
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
385
	unsigned long flags = 0;
386

387
	if (vha->vp_idx)
388 389 390 391
		return;
	if (list_empty(&ha->vp_list))
		return;

392
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
393

394 395 396
	if (!(ha->current_topology & ISP_CFG_F))
		return;

397 398 399 400 401 402
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

403
			qla2x00_do_dpc_vp(vp);
404 405 406 407

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
408
	}
409
	spin_unlock_irqrestore(&ha->vport_slock, flags);
410 411 412 413 414
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
415 416
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
433
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
434
		return VPCERR_BAD_WWN;
435 436 437 438 439 440
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
441 442 443 444
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
445 446 447 448 449 450 451 452
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
453 454
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
455
	scsi_qla_host_t *vha;
456
	struct scsi_host_template *sht = &qla2xxx_driver_template;
457 458
	struct Scsi_Host *host;

459 460
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
461 462
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
463 464 465
		return(NULL);
	}

466
	host = vha->host;
467 468 469 470 471 472 473 474 475
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
476 477
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
478
		goto create_vhost_failed;
479
	}
480
	vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
481 482 483 484 485 486 487 488 489 490 491

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

492
	qla2x00_start_timer(vha, WATCH_INTERVAL);
493

494 495
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
496
	host->cmd_per_lun = 3;
497
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
498 499 500
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
501
	host->max_channel = MAX_BUSES - 1;
502
	host->max_lun = ql2xmaxlun;
503
	host->unique_id = host->host_no;
504
	host->max_id = ha->max_fibre_devices;
505 506
	host->transportt = qla2xxx_transport_vport_template;

507 508 509
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
510 511 512

	vha->flags.init_done = 1;

513 514 515 516 517
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

518 519
	return vha;

520
create_vhost_failed:
521 522
	return NULL;
}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
540
	kfree(req->outstanding_cmds);
541 542 543 544 545 546 547 548 549 550 551
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
552
		free_irq(rsp->msix->vector, rsp->msix->handle);
553
		rsp->msix->have_irq = 0;
554
		rsp->msix->in_use = 0;
555
		rsp->msix->handle = NULL;
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
574
	int ret = QLA_SUCCESS;
575

576
	if (req && vha->flags.qpairs_req_created) {
577
		req->options |= BIT_0;
578
		ret = qla25xx_init_req_que(vha, req);
579 580
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
581 582

		qla25xx_free_req_que(vha, req);
583 584 585 586 587
	}

	return ret;
}

588
int
589 590
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
591
	int ret = QLA_SUCCESS;
592

593
	if (rsp && vha->flags.qpairs_rsp_created) {
594
		rsp->options |= BIT_0;
595
		ret = qla25xx_init_rsp_que(vha, rsp);
596 597
		if (ret != QLA_SUCCESS)
			return QLA_FUNCTION_FAILED;
598 599

		qla25xx_free_rsp_que(vha, rsp);
600 601 602 603 604 605 606
	}

	return ret;
}

/* Delete all queues for a given vhost */
int
607
qla25xx_delete_queues(struct scsi_qla_host *vha)
608 609 610 611 612
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;
613
	struct qla_qpair *qpair, *tqpair;
614

615
	if (ql2xmqsupport || ql2xnvmeenable) {
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
		    qp_list_elem)
			qla2xxx_delete_qpair(vha, qpair);
	} else {
		/* Delete request queues */
		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
			req = ha->req_q_map[cnt];
			if (req && test_bit(cnt, ha->req_qid_map)) {
				ret = qla25xx_delete_req_que(vha, req);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00ea,
					    "Couldn't delete req que %d.\n",
					    req->id);
					return ret;
				}
631 632
			}
		}
633

634 635 636 637 638 639 640 641 642 643 644
		/* Delete response queues */
		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
			rsp = ha->rsp_q_map[cnt];
			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
				ret = qla25xx_delete_rsp_que(vha, rsp);
				if (ret != QLA_SUCCESS) {
					ql_log(ql_log_warn, vha, 0x00eb,
					    "Couldn't delete rsp que %d.\n",
					    rsp->id);
					return ret;
				}
645 646 647
			}
		}
	}
648

649 650 651 652 653
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
654
    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
655 656 657 658
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
659
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
660
	uint16_t que_id = 0;
661
	device_reg_t *reg;
662
	uint32_t cnt;
663 664 665

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
666 667
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
668
		goto failed;
669 670 671 672 673 674 675
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
676
		ql_log(ql_log_fatal, base_vha, 0x00da,
677
		    "Failed to allocate memory for request_ring.\n");
678 679 680
		goto que_failed;
	}

681 682 683 684
	ret = qla2x00_alloc_outstanding_cmds(ha, req);
	if (ret != QLA_SUCCESS)
		goto que_failed;

685
	mutex_lock(&ha->mq_lock);
686 687
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
688
		mutex_unlock(&ha->mq_lock);
689 690
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
691 692 693 694 695 696 697 698
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

699 700 701 702 703 704
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
705 706 707
	if (rsp_que < 0)
		req->rsp = NULL;
	else
708 709 710 711 712 713 714 715
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
716

717 718 719 720
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
721
	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
722 723 724
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

725 726 727 728
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
729
	reg = ISP_QUE_REG(ha, que_id);
730 731
	req->req_q_in = &reg->isp25mq.req_q_in;
	req->req_q_out = &reg->isp25mq.req_q_out;
732
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
733
	req->out_ptr = (void *)(req->ring + req->length);
734
	mutex_unlock(&ha->mq_lock);
735 736 737 738 739 740 741 742 743 744
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
745

746 747 748 749 750 751 752 753 754 755
	if (startqp) {
		ret = qla25xx_init_req_que(base_vha, req);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00df,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->req_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
756
		vha->flags.qpairs_req_created = 1;
757 758 759 760 761 762
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
763
failed:
764 765 766
	return 0;
}

767 768
static void qla_do_work(struct work_struct *work)
{
769
	unsigned long flags;
770
	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
771
	struct scsi_qla_host *vha;
772
	struct qla_hw_data *ha = qpair->hw;
773
	struct srb_iocb	*nvme, *nxt_nvme;
774

775
	spin_lock_irqsave(&qpair->qp_lock, flags);
776
	vha = pci_get_drvdata(ha->pdev);
777 778
	qla24xx_process_response_queue(vha, qpair->rsp);
	spin_unlock_irqrestore(&qpair->qp_lock, flags);
779 780 781 782 783 784

	list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
		    u.nvme.entry) {
		list_del_init(&nvme->u.nvme.entry);
		qla_nvme_cmpl_io(nvme);
	}
785 786
}

787 788 789
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
790
    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
791 792 793 794
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
795
	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
796
	uint16_t que_id = 0;
797
	device_reg_t *reg;
798 799 800

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
801 802
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
803
		goto failed;
804 805
	}

806
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
807 808 809 810
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
811 812
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
813 814 815
		goto que_failed;
	}

816
	mutex_lock(&ha->mq_lock);
817 818
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
819
		mutex_unlock(&ha->mq_lock);
820 821
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
822 823 824 825
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

826
	rsp->msix = qpair->msix;
827 828 829 830 831

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
832
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
833
	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
834
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
835 836 837 838 839 840
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
841 842 843 844
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

845 846 847
	/* Set option to indicate response queue creation */
	options |= BIT_1;

848 849
	rsp->options = options;
	rsp->id = que_id;
850 851 852
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
853
	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
854
	mutex_unlock(&ha->mq_lock);
855
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
856
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
857 858 859
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
860
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
861 862
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
863

864 865
	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
	    QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
866 867 868
	if (ret)
		goto que_failed;

869 870 871 872 873 874 875 876 877 878
	if (startqp) {
		ret = qla25xx_init_rsp_que(base_vha, rsp);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_fatal, base_vha, 0x00e7,
			    "%s failed.\n", __func__);
			mutex_lock(&ha->mq_lock);
			clear_bit(que_id, ha->rsp_qid_map);
			mutex_unlock(&ha->mq_lock);
			goto que_failed;
		}
879
		vha->flags.qpairs_rsp_created = 1;
880
	}
881
	rsp->req = NULL;
882 883

	qla2x00_init_response_q_entries(rsp);
884 885
	if (qpair->hw->wq)
		INIT_WORK(&qpair->q_work, qla_do_work);
886 887 888 889
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
890
failed:
891 892
	return 0;
}
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968

static void qla_ctrlvp_sp_done(void *s, int res)
{
	struct srb *sp = s;

	complete(&sp->comp);
	/* don't free sp here. Let the caller do the free */
}

/**
 * qla24xx_control_vp() - Enable a virtual port for given host
 * @vha:	adapter block pointer
 * @cmd:	command type to be sent for enable virtual port
 *
 * Return:	qla2xxx local function return status code.
 */
int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
{
	int rval = QLA_MEMORY_ALLOC_FAILED;
	struct qla_hw_data *ha = vha->hw;
	int	vp_index = vha->vp_idx;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	srb_t *sp;

	ql_dbg(ql_dbg_vport, vha, 0x10c1,
	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);

	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
		return QLA_PARAMETER_ERROR;

	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
	if (!sp)
		goto done;

	sp->type = SRB_CTRL_VP;
	sp->name = "ctrl_vp";
	sp->done = qla_ctrlvp_sp_done;
	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
		    "%s: %s Failed submission. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}

	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
	    sp->name, sp->handle);

	wait_for_completion(&sp->comp);
	rval = sp->rc;
	switch (rval) {
	case QLA_FUNCTION_TIMEOUT:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
		    __func__, sp->name, rval);
		break;
	case QLA_SUCCESS:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
		    __func__, sp->name);
		goto done_free_sp;
	default:
		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
		    __func__, sp->name, rval);
		goto done_free_sp;
	}
done:
	return rval;

done_free_sp:
	sp->free(sp);
	return rval;
}