qla_mid.c 18.8 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2011 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9 10 11

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
12
#include <linux/slab.h>
13 14 15 16 17 18 19 20 21
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
22
	if (vha->vp_idx && vha->timer_active) {
23 24 25 26 27
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
28
static uint32_t
29 30 31
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
32
	struct qla_hw_data *ha = vha->hw;
33
	unsigned long flags;
34 35

	/* Find an empty slot and assign an vp_id */
36
	mutex_lock(&ha->vport_lock);
37 38 39 40
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
		DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports));
41
		mutex_unlock(&ha->vport_lock);
42 43 44
		return vp_id;
	}

45
	set_bit(vp_id, ha->vp_idx_map);
46 47
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
48 49

	spin_lock_irqsave(&ha->vport_slock, flags);
50
	list_add_tail(&vha->list, &ha->vp_list);
51 52
	spin_unlock_irqrestore(&ha->vport_slock, flags);

53
	mutex_unlock(&ha->vport_lock);
54 55 56 57 58 59 60
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
61
	struct qla_hw_data *ha = vha->hw;
62
	unsigned long flags = 0;
63

64
	mutex_lock(&ha->vport_lock);
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
	spin_lock_irqsave(&ha->vport_slock, flags);
	while (atomic_read(&vha->vref_count)) {
		spin_unlock_irqrestore(&ha->vport_slock, flags);

		msleep(500);

		spin_lock_irqsave(&ha->vport_slock, flags);
	}
	list_del(&vha->list);
	spin_unlock_irqrestore(&ha->vport_slock, flags);

83 84
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
85
	clear_bit(vp_id, ha->vp_idx_map);
86

87
	mutex_unlock(&ha->vport_lock);
88 89
}

A
Adrian Bunk 已提交
90
static scsi_qla_host_t *
91
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
92 93
{
	scsi_qla_host_t *vha;
94
	struct scsi_qla_host *tvha;
95
	unsigned long flags;
96

97
	spin_lock_irqsave(&ha->vport_slock, flags);
98
	/* Locate matching device in database. */
99
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
100 101
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
102
			return vha;
103
		}
104
	}
105
	spin_unlock_irqrestore(&ha->vport_slock, flags);
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
122
static void
123 124
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
125 126 127 128 129 130
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
131 132
	fc_port_t *fcport;

133
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 135 136 137 138
		DEBUG15(printk("scsi(%ld): Marking port dead, "
		    "loop_id=0x%04x :%x\n",
		    vha->host_no, fcport->loop_id, fcport->vp_idx));

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
139
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
168 169
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
170 171

	/* Check if physical ha port is Up */
172
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
173 174
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
175 176 177 178 179 180
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
181
	mutex_lock(&ha->vport_lock);
182
	ret = qla24xx_modify_vp_config(vha);
183
	mutex_unlock(&ha->vport_lock);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

	DEBUG15(qla_printk(KERN_INFO, ha,
	    "Virtual port with id: %d - Enabled\n", vha->vp_idx));
	return 0;

enable_failed:
	DEBUG15(qla_printk(KERN_INFO, ha,
	    "Virtual port with id: %d - Disabled\n", vha->vp_idx));
	return 1;
}

200
static void
201 202 203 204 205 206 207 208 209 210 211
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

	DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
	    vha->host_no, __func__));
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
212 213
		DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
		    "receiving of RSCN requests: 0x%x\n", ret));
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
229
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
230
{
231
	scsi_qla_host_t *vha;
232
	struct qla_hw_data *ha = rsp->hw;
233
	int i = 0;
234
	unsigned long flags;
235

236 237
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
238
		if (vha->vp_idx) {
239 240 241
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

242 243 244 245 246 247 248 249 250 251
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
				DEBUG15(printk("scsi(%ld)%s: Async_event for"
252 253
				" VP[%d], mb = 0x%x, vha=%p\n",
				vha->host_no, __func__, i, *mb, vha));
254
				qla2x00_async_event(vha, rsp, mb);
255 256
				break;
			}
257 258 259

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
260
		}
261
		i++;
262
	}
263
	spin_unlock_irqrestore(&ha->vport_slock, flags);
264 265
}

266
int
267 268 269 270 271 272 273 274 275 276 277 278 279 280
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

281 282 283 284 285
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
286 287 288
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

289 290
	DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
	    vha->host_no, vha->vp_idx));
291
	return qla24xx_enable_vp(vha);
292 293
}

A
Adrian Bunk 已提交
294
static int
295 296
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
297 298
	qla2x00_do_work(vha);

299 300
	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
		/* VP acquired. complete port configuration */
301
		qla24xx_configure_vp(vha);
302 303 304
		return 0;
	}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
	}

	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
		atomic_read(&vha->loop_state) != LOOP_DOWN) {

		DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
						vha->host_no));
		qla2x00_relogin(vha);

		DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
							vha->host_no));
	}
321 322 323 324 325 326

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

327
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
328 329 330 331 332 333 334 335 336 337
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
		}
	}

	return 0;
}

void
338
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
339 340
{
	int ret;
341 342
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
343
	unsigned long flags = 0;
344

345
	if (vha->vp_idx)
346 347 348 349
		return;
	if (list_empty(&ha->vp_list))
		return;

350
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
351

352 353 354
	if (!(ha->current_topology & ISP_CFG_F))
		return;

355 356 357 358 359 360
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

361
			ret = qla2x00_do_dpc_vp(vp);
362 363 364 365

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
366
	}
367
	spin_unlock_irqrestore(&ha->vport_slock, flags);
368 369 370 371 372
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
373 374
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
391
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
392
		return VPCERR_BAD_WWN;
393 394 395 396 397 398
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
399
		DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
400
		    "max_npv_vports %ud.\n", base_vha->host_no,
401
		    ha->num_vhosts, ha->max_npiv_vports));
402 403 404 405 406 407 408 409
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
410 411
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
412
	scsi_qla_host_t *vha;
413
	struct scsi_host_template *sht = &qla2xxx_driver_template;
414 415
	struct Scsi_Host *host;

416 417 418
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
		DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
419 420 421
		return(NULL);
	}

422
	host = vha->host;
423 424 425 426 427 428 429 430 431 432 433
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
		DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
			vha->host_no));
434
		goto create_vhost_failed;
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
	}
	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);

450 451
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
452 453
	host->this_id = 255;
	host->cmd_per_lun = 3;
454 455 456 457
	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
458
	host->max_channel = MAX_BUSES - 1;
459
	host->max_lun = ql2xmaxlun;
460
	host->unique_id = host->host_no;
461 462 463 464 465 466 467 468
	host->max_id = MAX_TARGETS_2200;
	host->transportt = qla2xxx_transport_vport_template;

	DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
	    vha->host_no, vha));

	vha->flags.init_done = 1;

469 470 471 472 473
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

474 475
	return vha;

476
create_vhost_failed:
477 478
	return NULL;
}
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
		free_irq(rsp->msix->vector, rsp);
		rsp->msix->have_irq = 0;
		rsp->msix->rsp = NULL;
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	int ret = -1;

	if (req) {
		req->options |= BIT_0;
532
		ret = qla25xx_init_req_que(vha, req);
533 534 535 536 537 538 539
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_req_que(vha, req);

	return ret;
}

540
static int
541 542 543 544 545 546
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	int ret = -1;

	if (rsp) {
		rsp->options |= BIT_0;
547
		ret = qla25xx_init_rsp_que(vha, rsp);
548 549 550 551 552 553 554 555 556
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_rsp_que(vha, rsp);

	return ret;
}

/* Delete all queues for a given vhost */
int
557
qla25xx_delete_queues(struct scsi_qla_host *vha)
558 559 560 561 562 563
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;

564 565 566
	/* Delete request queues */
	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
		req = ha->req_q_map[cnt];
567 568 569 570
		if (req) {
			ret = qla25xx_delete_req_que(vha, req);
			if (ret != QLA_SUCCESS) {
				qla_printk(KERN_WARNING, ha,
571 572
				"Couldn't delete req que %d\n",
				req->id);
573 574 575
				return ret;
			}
		}
576 577 578 579 580 581 582 583 584 585 586 587
	}

	/* Delete response queues */
	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
		rsp = ha->rsp_q_map[cnt];
		if (rsp) {
			ret = qla25xx_delete_rsp_que(vha, rsp);
			if (ret != QLA_SUCCESS) {
				qla_printk(KERN_WARNING, ha,
				"Couldn't delete rsp que %d\n",
				rsp->id);
				return ret;
588 589 590 591 592 593 594 595
			}
		}
	}
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
596
	uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
597 598 599 600 601
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	uint16_t que_id = 0;
602
	device_reg_t __iomem *reg;
603
	uint32_t cnt;
604 605 606 607 608

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
		qla_printk(KERN_WARNING, ha, "could not allocate memory"
			"for request que\n");
609
		goto failed;
610 611 612 613 614 615 616 617 618 619 620 621 622
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
		qla_printk(KERN_WARNING, ha,
		"Memory Allocation failed - request_ring\n");
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
623 624
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
625 626 627 628 629 630 631 632 633 634 635
		mutex_unlock(&ha->vport_lock);
		qla_printk(KERN_INFO, ha, "No resources to create "
			 "additional request queue\n");
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

636 637 638
	if (rsp_que < 0)
		req->rsp = NULL;
	else
639 640 641 642 643 644 645 646
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
647 648 649 650 651

	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

652 653 654 655
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
656
	reg = ISP_QUE_REG(ha, que_id);
657
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
658 659
	mutex_unlock(&ha->vport_lock);

660
	ret = qla25xx_init_req_que(base_vha, req);
661 662 663 664 665 666 667 668 669 670 671 672
	if (ret != QLA_SUCCESS) {
		qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
673
failed:
674 675 676
	return 0;
}

677 678
static void qla_do_work(struct work_struct *work)
{
679
	unsigned long flags;
680 681
	struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
	struct scsi_qla_host *vha;
682
	struct qla_hw_data *ha = rsp->hw;
683

684 685
	spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
	vha = pci_get_drvdata(ha->pdev);
686
	qla24xx_process_response_queue(vha, rsp);
687
	spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
688 689
}

690 691 692
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
693
	uint8_t vp_idx, uint16_t rid, int req)
694 695 696 697
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
698 699
	uint16_t que_id = 0;
	device_reg_t __iomem *reg;
700 701 702 703 704

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
		qla_printk(KERN_WARNING, ha, "could not allocate memory for"
				" response que\n");
705
		goto failed;
706 707
	}

708
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
709 710 711 712 713 714 715 716 717 718
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
		qla_printk(KERN_WARNING, ha,
		"Memory Allocation failed - response_ring\n");
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
719 720
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
		mutex_unlock(&ha->vport_lock);
		qla_printk(KERN_INFO, ha, "No resources to create "
			 "additional response queue\n");
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

	if (ha->flags.msix_enabled)
		rsp->msix = &ha->msix_entries[que_id + 1];
	else
		qla_printk(KERN_WARNING, ha, "msix not enabled\n");

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
743 744 745 746
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

747 748
	rsp->options = options;
	rsp->id = que_id;
749 750 751
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
752 753 754 755 756 757
	mutex_unlock(&ha->vport_lock);

	ret = qla25xx_request_irq(rsp);
	if (ret)
		goto que_failed;

758
	ret = qla25xx_init_rsp_que(base_vha, rsp);
759 760 761 762 763 764 765
	if (ret != QLA_SUCCESS) {
		qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}
766 767 768 769
	if (req >= 0)
		rsp->req = ha->req_q_map[req];
	else
		rsp->req = NULL;
770 771

	qla2x00_init_response_q_entries(rsp);
772 773
	if (rsp->hw->wq)
		INIT_WORK(&rsp->q_work, qla_do_work);
774 775 776 777
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
778
failed:
779 780
	return 0;
}