qla_mid.c 20.8 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2011 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9 10 11

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
12
#include <linux/slab.h>
13 14 15 16 17 18 19 20 21
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
22
	if (vha->vp_idx && vha->timer_active) {
23 24 25 26 27
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
28
static uint32_t
29 30 31
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
32
	struct qla_hw_data *ha = vha->hw;
33
	unsigned long flags;
34 35

	/* Find an empty slot and assign an vp_id */
36
	mutex_lock(&ha->vport_lock);
37 38
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
39 40 41
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
42
		mutex_unlock(&ha->vport_lock);
43 44 45
		return vp_id;
	}

46
	set_bit(vp_id, ha->vp_idx_map);
47 48
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
49 50

	spin_lock_irqsave(&ha->vport_slock, flags);
51
	list_add_tail(&vha->list, &ha->vp_list);
52 53
	spin_unlock_irqrestore(&ha->vport_slock, flags);

54
	mutex_unlock(&ha->vport_lock);
55 56 57 58 59 60 61
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
62
	struct qla_hw_data *ha = vha->hw;
63
	unsigned long flags = 0;
64

65
	mutex_lock(&ha->vport_lock);
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
	spin_lock_irqsave(&ha->vport_slock, flags);
	while (atomic_read(&vha->vref_count)) {
		spin_unlock_irqrestore(&ha->vport_slock, flags);

		msleep(500);

		spin_lock_irqsave(&ha->vport_slock, flags);
	}
	list_del(&vha->list);
	spin_unlock_irqrestore(&ha->vport_slock, flags);

84 85
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
86
	clear_bit(vp_id, ha->vp_idx_map);
87

88
	mutex_unlock(&ha->vport_lock);
89 90
}

A
Adrian Bunk 已提交
91
static scsi_qla_host_t *
92
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93 94
{
	scsi_qla_host_t *vha;
95
	struct scsi_qla_host *tvha;
96
	unsigned long flags;
97

98
	spin_lock_irqsave(&ha->vport_slock, flags);
99
	/* Locate matching device in database. */
100
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
101 102
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
103
			return vha;
104
		}
105
	}
106
	spin_unlock_irqrestore(&ha->vport_slock, flags);
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
123
static void
124 125
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
126 127 128 129 130 131
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
132 133
	fc_port_t *fcport;

134
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
135 136 137
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
		    fcport->loop_id, fcport->vp_idx);
138 139

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
140
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
169 170
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
171 172

	/* Check if physical ha port is Up */
173
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
174 175
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
176 177 178 179 180 181
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
182
	mutex_lock(&ha->vport_lock);
183
	ret = qla24xx_modify_vp_config(vha);
184
	mutex_unlock(&ha->vport_lock);
185 186 187 188 189 190

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

191 192
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
193 194 195
	return 0;

enable_failed:
196 197
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
198 199 200
	return 1;
}

201
static void
202 203 204 205 206 207 208
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

209 210
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
211 212
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
213 214
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
230
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
231
{
232
	scsi_qla_host_t *vha;
233
	struct qla_hw_data *ha = rsp->hw;
234
	int i = 0;
235
	unsigned long flags;
236

237 238
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
239
		if (vha->vp_idx) {
240 241 242
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

243 244 245 246 247 248 249 250 251
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
252 253 254
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
255
				qla2x00_async_event(vha, rsp, mb);
256 257
				break;
			}
258 259 260

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
261
		}
262
		i++;
263
	}
264
	spin_unlock_irqrestore(&ha->vport_slock, flags);
265 266
}

267
int
268 269 270 271 272 273 274 275 276 277 278 279 280 281
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

282 283 284 285 286
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
287 288 289
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

290 291
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
292
	return qla24xx_enable_vp(vha);
293 294
}

A
Adrian Bunk 已提交
295
static int
296 297
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
298 299
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
300

301 302
	qla2x00_do_work(vha);

303 304
	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
		/* VP acquired. complete port configuration */
305 306
		ql_dbg(ql_dbg_dpc, vha, 0x4014,
		    "Configure VP scheduled.\n");
307
		qla24xx_configure_vp(vha);
308 309
		ql_dbg(ql_dbg_dpc, vha, 0x4015,
		    "Configure VP end.\n");
310 311 312
		return 0;
	}

313
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
314 315
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
316 317
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
318 319
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
320 321 322 323 324 325
	}

	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
		atomic_read(&vha->loop_state) != LOOP_DOWN) {

326 327
		ql_dbg(ql_dbg_dpc, vha, 0x4018,
		    "Relogin needed scheduled.\n");
328
		qla2x00_relogin(vha);
329 330
		ql_dbg(ql_dbg_dpc, vha, 0x4019,
		    "Relogin needed end.\n");
331
	}
332 333 334 335 336 337

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

338
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
339
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
340 341
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
342 343
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
344 345
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
346 347 348
		}
	}

349
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
350
	    "Exiting %s.\n", __func__);
351 352 353 354
	return 0;
}

void
355
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
356 357
{
	int ret;
358 359
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
360
	unsigned long flags = 0;
361

362
	if (vha->vp_idx)
363 364 365 366
		return;
	if (list_empty(&ha->vp_list))
		return;

367
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
368

369 370 371
	if (!(ha->current_topology & ISP_CFG_F))
		return;

372 373 374 375 376 377
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

378
			ret = qla2x00_do_dpc_vp(vp);
379 380 381 382

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
383
	}
384
	spin_unlock_irqrestore(&ha->vport_slock, flags);
385 386 387 388 389
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
390 391
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
408
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
409
		return VPCERR_BAD_WWN;
410 411 412 413 414 415
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
416 417 418 419
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
420 421 422 423 424 425 426 427
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
428 429
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
430
	scsi_qla_host_t *vha;
431
	struct scsi_host_template *sht = &qla2xxx_driver_template;
432 433
	struct Scsi_Host *host;

434 435
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
436 437
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
438 439 440
		return(NULL);
	}

441
	host = vha->host;
442 443 444 445 446 447 448 449 450
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
451 452
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
453
		goto create_vhost_failed;
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	}
	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);

469 470
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
471 472
	host->this_id = 255;
	host->cmd_per_lun = 3;
473
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
474 475 476
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
477
	host->max_channel = MAX_BUSES - 1;
478
	host->max_lun = ql2xmaxlun;
479
	host->unique_id = host->host_no;
480
	host->max_id = ha->max_fibre_devices;
481 482
	host->transportt = qla2xxx_transport_vport_template;

483 484 485
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
486 487 488

	vha->flags.init_done = 1;

489 490 491 492 493
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

494 495
	return vha;

496
create_vhost_failed:
497 498
	return NULL;
}
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
		free_irq(rsp->msix->vector, rsp);
		rsp->msix->have_irq = 0;
		rsp->msix->rsp = NULL;
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	int ret = -1;

	if (req) {
		req->options |= BIT_0;
552
		ret = qla25xx_init_req_que(vha, req);
553 554 555 556 557 558 559
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_req_que(vha, req);

	return ret;
}

560
static int
561 562 563 564 565 566
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	int ret = -1;

	if (rsp) {
		rsp->options |= BIT_0;
567
		ret = qla25xx_init_rsp_que(vha, rsp);
568 569 570 571 572 573 574 575 576
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_rsp_que(vha, rsp);

	return ret;
}

/* Delete all queues for a given vhost */
int
577
qla25xx_delete_queues(struct scsi_qla_host *vha)
578 579 580 581 582 583
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;

584 585 586
	/* Delete request queues */
	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
		req = ha->req_q_map[cnt];
587 588 589
		if (req) {
			ret = qla25xx_delete_req_que(vha, req);
			if (ret != QLA_SUCCESS) {
590 591 592
				ql_log(ql_log_warn, vha, 0x00ea,
				    "Couldn't delete req que %d.\n",
				    req->id);
593 594 595
				return ret;
			}
		}
596 597 598 599 600 601 602 603
	}

	/* Delete response queues */
	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
		rsp = ha->rsp_q_map[cnt];
		if (rsp) {
			ret = qla25xx_delete_rsp_que(vha, rsp);
			if (ret != QLA_SUCCESS) {
604 605 606
				ql_log(ql_log_warn, vha, 0x00eb,
				    "Couldn't delete rsp que %d.\n",
				    rsp->id);
607
				return ret;
608 609 610 611 612 613 614 615
			}
		}
	}
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
616
	uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
617 618 619 620 621
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	uint16_t que_id = 0;
622
	device_reg_t __iomem *reg;
623
	uint32_t cnt;
624 625 626

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
627 628
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
629
		goto failed;
630 631 632 633 634 635 636
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
637 638
		ql_log(ql_log_fatal, base_vha, 0x00da,
		    "Failed to allocte memory for request_ring.\n");
639 640 641 642
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
643 644
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
645
		mutex_unlock(&ha->vport_lock);
646 647
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
648 649 650 651 652 653 654 655
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

656 657 658 659 660 661
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
662 663 664
	if (rsp_que < 0)
		req->rsp = NULL;
	else
665 666 667 668 669 670 671 672
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
673

674 675 676 677
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
678 679 680 681
	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

682 683 684 685
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
686
	reg = ISP_QUE_REG(ha, que_id);
687
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
688
	mutex_unlock(&ha->vport_lock);
689 690 691 692 693 694 695 696 697 698
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
699

700
	ret = qla25xx_init_req_que(base_vha, req);
701
	if (ret != QLA_SUCCESS) {
702 703
		ql_log(ql_log_fatal, base_vha, 0x00df,
		    "%s failed.\n", __func__);
704 705 706 707 708 709 710 711 712 713
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
714
failed:
715 716 717
	return 0;
}

718 719
static void qla_do_work(struct work_struct *work)
{
720
	unsigned long flags;
721 722
	struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
	struct scsi_qla_host *vha;
723
	struct qla_hw_data *ha = rsp->hw;
724

725 726
	spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
	vha = pci_get_drvdata(ha->pdev);
727
	qla24xx_process_response_queue(vha, rsp);
728
	spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
729 730
}

731 732 733
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
734
	uint8_t vp_idx, uint16_t rid, int req)
735 736 737 738
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
739 740
	uint16_t que_id = 0;
	device_reg_t __iomem *reg;
741 742 743

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
744 745
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
746
		goto failed;
747 748
	}

749
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
750 751 752 753
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
754 755
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
756 757 758 759
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
760 761
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
762
		mutex_unlock(&ha->vport_lock);
763 764
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
765 766 767 768 769 770 771
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

	if (ha->flags.msix_enabled)
		rsp->msix = &ha->msix_entries[que_id + 1];
	else
772 773
		ql_log(ql_log_warn, base_vha, 0x00e3,
		    "MSIX not enalbled.\n");
774 775 776 777 778

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
779 780 781
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
	    "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
782 783 784 785 786 787
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
788 789 790 791
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

792 793
	rsp->options = options;
	rsp->id = que_id;
794 795 796
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
797
	mutex_unlock(&ha->vport_lock);
798 799 800 801 802 803 804 805
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
806 807 808 809 810

	ret = qla25xx_request_irq(rsp);
	if (ret)
		goto que_failed;

811
	ret = qla25xx_init_rsp_que(base_vha, rsp);
812
	if (ret != QLA_SUCCESS) {
813 814
		ql_log(ql_log_fatal, base_vha, 0x00e7,
		    "%s failed.\n", __func__);
815 816 817 818 819
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}
820 821 822 823
	if (req >= 0)
		rsp->req = ha->req_q_map[req];
	else
		rsp->req = NULL;
824 825

	qla2x00_init_response_q_entries(rsp);
826 827
	if (rsp->hw->wq)
		INIT_WORK(&rsp->q_work, qla_do_work);
828 829 830 831
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
832
failed:
833 834
	return 0;
}