qla_mid.c 20.8 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2011 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9 10 11

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
12
#include <linux/slab.h>
13 14 15 16 17 18 19 20 21
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
22
	if (vha->vp_idx && vha->timer_active) {
23 24 25 26 27
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
28
static uint32_t
29 30 31
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
32
	struct qla_hw_data *ha = vha->hw;
33
	unsigned long flags;
34 35

	/* Find an empty slot and assign an vp_id */
36
	mutex_lock(&ha->vport_lock);
37 38
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
39 40 41
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
42
		mutex_unlock(&ha->vport_lock);
43 44 45
		return vp_id;
	}

46
	set_bit(vp_id, ha->vp_idx_map);
47 48
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
49 50

	spin_lock_irqsave(&ha->vport_slock, flags);
51
	list_add_tail(&vha->list, &ha->vp_list);
52 53
	spin_unlock_irqrestore(&ha->vport_slock, flags);

54
	mutex_unlock(&ha->vport_lock);
55 56 57 58 59 60 61
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
62
	struct qla_hw_data *ha = vha->hw;
63
	unsigned long flags = 0;
64

65
	mutex_lock(&ha->vport_lock);
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
	spin_lock_irqsave(&ha->vport_slock, flags);
	while (atomic_read(&vha->vref_count)) {
		spin_unlock_irqrestore(&ha->vport_slock, flags);

		msleep(500);

		spin_lock_irqsave(&ha->vport_slock, flags);
	}
	list_del(&vha->list);
	spin_unlock_irqrestore(&ha->vport_slock, flags);

84 85
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
86
	clear_bit(vp_id, ha->vp_idx_map);
87

88
	mutex_unlock(&ha->vport_lock);
89 90
}

A
Adrian Bunk 已提交
91
static scsi_qla_host_t *
92
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93 94
{
	scsi_qla_host_t *vha;
95
	struct scsi_qla_host *tvha;
96
	unsigned long flags;
97

98
	spin_lock_irqsave(&ha->vport_slock, flags);
99
	/* Locate matching device in database. */
100
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
101 102
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
103
			return vha;
104
		}
105
	}
106
	spin_unlock_irqrestore(&ha->vport_slock, flags);
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
123
static void
124 125
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
126 127 128 129 130 131
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
132 133
	fc_port_t *fcport;

134
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
135 136 137
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
		    fcport->loop_id, fcport->vp_idx);
138 139

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
140
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
169 170
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
171 172

	/* Check if physical ha port is Up */
173
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
174 175
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
176 177 178 179 180 181
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
182
	mutex_lock(&ha->vport_lock);
183
	ret = qla24xx_modify_vp_config(vha);
184
	mutex_unlock(&ha->vport_lock);
185 186 187 188 189 190

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

191 192
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
193 194 195
	return 0;

enable_failed:
196 197
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
198 199 200
	return 1;
}

201
static void
202 203 204 205 206 207 208
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

209 210
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
211 212
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
213 214
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
230
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
231
{
232
	scsi_qla_host_t *vha;
233
	struct qla_hw_data *ha = rsp->hw;
234
	int i = 0;
235
	unsigned long flags;
236

237 238
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
239
		if (vha->vp_idx) {
240 241 242
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

243 244 245 246 247 248 249 250 251
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
252 253 254
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
255
				qla2x00_async_event(vha, rsp, mb);
256 257
				break;
			}
258 259 260

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
261
		}
262
		i++;
263
	}
264
	spin_unlock_irqrestore(&ha->vport_slock, flags);
265 266
}

267
int
268 269 270 271 272 273 274 275 276 277 278 279 280 281
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

282 283 284 285 286
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
287 288 289
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

290 291
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
292
	return qla24xx_enable_vp(vha);
293 294
}

A
Adrian Bunk 已提交
295
static int
296 297
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
298 299 300 301 302
	ql_dbg(ql_dbg_dpc, vha, 0x4012,
	    "Entering %s.\n", __func__);
	ql_dbg(ql_dbg_dpc, vha, 0x4013,
	    "vp_flags: 0x%lx.\n", vha->vp_flags);

303 304
	qla2x00_do_work(vha);

305 306
	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
		/* VP acquired. complete port configuration */
307 308
		ql_dbg(ql_dbg_dpc, vha, 0x4014,
		    "Configure VP scheduled.\n");
309
		qla24xx_configure_vp(vha);
310 311
		ql_dbg(ql_dbg_dpc, vha, 0x4015,
		    "Configure VP end.\n");
312 313 314
		return 0;
	}

315
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
316 317
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
318 319
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
320 321
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
322 323 324 325 326 327
	}

	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
		atomic_read(&vha->loop_state) != LOOP_DOWN) {

328 329
		ql_dbg(ql_dbg_dpc, vha, 0x4018,
		    "Relogin needed scheduled.\n");
330
		qla2x00_relogin(vha);
331 332
		ql_dbg(ql_dbg_dpc, vha, 0x4019,
		    "Relogin needed end.\n");
333
	}
334 335 336 337 338 339

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

340
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
341
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
342 343
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
344 345
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
346 347
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
348 349 350
		}
	}

351 352
	ql_dbg(ql_dbg_dpc, vha, 0x401c,
	    "Exiting %s.\n", __func__);
353 354 355 356
	return 0;
}

void
357
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
358 359
{
	int ret;
360 361
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
362
	unsigned long flags = 0;
363

364
	if (vha->vp_idx)
365 366 367 368
		return;
	if (list_empty(&ha->vp_list))
		return;

369
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
370

371 372 373
	if (!(ha->current_topology & ISP_CFG_F))
		return;

374 375 376 377 378 379
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

380
			ret = qla2x00_do_dpc_vp(vp);
381 382 383 384

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
385
	}
386
	spin_unlock_irqrestore(&ha->vport_slock, flags);
387 388 389 390 391
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
392 393
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
410
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
411
		return VPCERR_BAD_WWN;
412 413 414 415 416 417
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
418 419 420 421
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
422 423 424 425 426 427 428 429
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
430 431
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
432
	scsi_qla_host_t *vha;
433
	struct scsi_host_template *sht = &qla2xxx_driver_template;
434 435
	struct Scsi_Host *host;

436 437
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
438 439
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
440 441 442
		return(NULL);
	}

443
	host = vha->host;
444 445 446 447 448 449 450 451 452
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
453 454
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
455
		goto create_vhost_failed;
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	}
	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);

471 472
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
473 474
	host->this_id = 255;
	host->cmd_per_lun = 3;
475 476 477 478
	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
479
	host->max_channel = MAX_BUSES - 1;
480
	host->max_lun = ql2xmaxlun;
481
	host->unique_id = host->host_no;
482 483 484
	host->max_id = MAX_TARGETS_2200;
	host->transportt = qla2xxx_transport_vport_template;

485 486 487
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
488 489 490

	vha->flags.init_done = 1;

491 492 493 494 495
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

496 497
	return vha;

498
create_vhost_failed:
499 500
	return NULL;
}
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
		free_irq(rsp->msix->vector, rsp);
		rsp->msix->have_irq = 0;
		rsp->msix->rsp = NULL;
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	int ret = -1;

	if (req) {
		req->options |= BIT_0;
554
		ret = qla25xx_init_req_que(vha, req);
555 556 557 558 559 560 561
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_req_que(vha, req);

	return ret;
}

562
static int
563 564 565 566 567 568
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	int ret = -1;

	if (rsp) {
		rsp->options |= BIT_0;
569
		ret = qla25xx_init_rsp_que(vha, rsp);
570 571 572 573 574 575 576 577 578
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_rsp_que(vha, rsp);

	return ret;
}

/* Delete all queues for a given vhost */
int
579
qla25xx_delete_queues(struct scsi_qla_host *vha)
580 581 582 583 584 585
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;

586 587 588
	/* Delete request queues */
	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
		req = ha->req_q_map[cnt];
589 590 591
		if (req) {
			ret = qla25xx_delete_req_que(vha, req);
			if (ret != QLA_SUCCESS) {
592 593 594
				ql_log(ql_log_warn, vha, 0x00ea,
				    "Couldn't delete req que %d.\n",
				    req->id);
595 596 597
				return ret;
			}
		}
598 599 600 601 602 603 604 605
	}

	/* Delete response queues */
	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
		rsp = ha->rsp_q_map[cnt];
		if (rsp) {
			ret = qla25xx_delete_rsp_que(vha, rsp);
			if (ret != QLA_SUCCESS) {
606 607 608
				ql_log(ql_log_warn, vha, 0x00eb,
				    "Couldn't delete rsp que %d.\n",
				    rsp->id);
609
				return ret;
610 611 612 613 614 615 616 617
			}
		}
	}
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
618
	uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
619 620 621 622 623
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	uint16_t que_id = 0;
624
	device_reg_t __iomem *reg;
625
	uint32_t cnt;
626 627 628

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
629 630
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
631
		goto failed;
632 633 634 635 636 637 638
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
639 640
		ql_log(ql_log_fatal, base_vha, 0x00da,
		    "Failed to allocte memory for request_ring.\n");
641 642 643 644
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
645 646
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
647
		mutex_unlock(&ha->vport_lock);
648 649
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
650 651 652 653 654 655 656 657
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

658 659 660 661 662 663
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
664 665 666
	if (rsp_que < 0)
		req->rsp = NULL;
	else
667 668 669 670 671 672 673 674
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
675

676 677 678 679
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
680 681 682 683
	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

684 685 686 687
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
688
	reg = ISP_QUE_REG(ha, que_id);
689
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
690
	mutex_unlock(&ha->vport_lock);
691 692 693 694 695 696 697 698 699 700
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
701

702
	ret = qla25xx_init_req_que(base_vha, req);
703
	if (ret != QLA_SUCCESS) {
704 705
		ql_log(ql_log_fatal, base_vha, 0x00df,
		    "%s failed.\n", __func__);
706 707 708 709 710 711 712 713 714 715
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
716
failed:
717 718 719
	return 0;
}

720 721
static void qla_do_work(struct work_struct *work)
{
722
	unsigned long flags;
723 724
	struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
	struct scsi_qla_host *vha;
725
	struct qla_hw_data *ha = rsp->hw;
726

727 728
	spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
	vha = pci_get_drvdata(ha->pdev);
729
	qla24xx_process_response_queue(vha, rsp);
730
	spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
731 732
}

733 734 735
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
736
	uint8_t vp_idx, uint16_t rid, int req)
737 738 739 740
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
741 742
	uint16_t que_id = 0;
	device_reg_t __iomem *reg;
743 744 745

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
746 747
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
748
		goto failed;
749 750
	}

751
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
752 753 754 755
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
756 757
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
758 759 760 761
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
762 763
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
764
		mutex_unlock(&ha->vport_lock);
765 766
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
767 768 769 770 771 772 773
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

	if (ha->flags.msix_enabled)
		rsp->msix = &ha->msix_entries[que_id + 1];
	else
774 775
		ql_log(ql_log_warn, base_vha, 0x00e3,
		    "MSIX not enalbled.\n");
776 777 778 779 780

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
781 782 783
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
	    "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
784 785 786 787 788 789
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
790 791 792 793
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

794 795
	rsp->options = options;
	rsp->id = que_id;
796 797 798
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
799
	mutex_unlock(&ha->vport_lock);
800 801 802 803 804 805 806 807
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
808 809 810 811 812

	ret = qla25xx_request_irq(rsp);
	if (ret)
		goto que_failed;

813
	ret = qla25xx_init_rsp_que(base_vha, rsp);
814
	if (ret != QLA_SUCCESS) {
815 816
		ql_log(ql_log_fatal, base_vha, 0x00e7,
		    "%s failed.\n", __func__);
817 818 819 820 821
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}
822 823 824 825
	if (req >= 0)
		rsp->req = ha->req_q_map[req];
	else
		rsp->req = NULL;
826 827

	qla2x00_init_response_q_entries(rsp);
828 829
	if (rsp->hw->wq)
		INIT_WORK(&rsp->q_work, qla_do_work);
830 831 832 833
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
834
failed:
835 836
	return 0;
}