qla_mid.c 20.9 KB
Newer Older
1
/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2012 QLogic Corporation
4
 *
5
 * See LICENSE.qla2xxx for copyright and licensing details.
6 7
 */
#include "qla_def.h"
8
#include "qla_gbl.h"
9
#include "qla_target.h"
10 11 12

#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20 21 22
#include <linux/list.h>

#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>

void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
23
	if (vha->vp_idx && vha->timer_active) {
24 25 26 27 28
		del_timer_sync(&vha->timer);
		vha->timer_active = 0;
	}
}

A
Adrian Bunk 已提交
29
static uint32_t
30 31 32
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
	uint32_t vp_id;
33
	struct qla_hw_data *ha = vha->hw;
34
	unsigned long flags;
35 36

	/* Find an empty slot and assign an vp_id */
37
	mutex_lock(&ha->vport_lock);
38 39
	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
	if (vp_id > ha->max_npiv_vports) {
40 41 42
		ql_dbg(ql_dbg_vport, vha, 0xa000,
		    "vp_id %d is bigger than max-supported %d.\n",
		    vp_id, ha->max_npiv_vports);
43
		mutex_unlock(&ha->vport_lock);
44 45 46
		return vp_id;
	}

47
	set_bit(vp_id, ha->vp_idx_map);
48 49
	ha->num_vhosts++;
	vha->vp_idx = vp_id;
50 51

	spin_lock_irqsave(&ha->vport_slock, flags);
52
	list_add_tail(&vha->list, &ha->vp_list);
53 54 55

	qlt_update_vp_map(vha, SET_VP_IDX);

56 57
	spin_unlock_irqrestore(&ha->vport_slock, flags);

58
	mutex_unlock(&ha->vport_lock);
59 60 61 62 63 64 65
	return vp_id;
}

void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
	uint16_t vp_id;
66
	struct qla_hw_data *ha = vha->hw;
67
	unsigned long flags = 0;
68

69
	mutex_lock(&ha->vport_lock);
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	/*
	 * Wait for all pending activities to finish before removing vport from
	 * the list.
	 * Lock needs to be held for safe removal from the list (it
	 * ensures no active vp_list traversal while the vport is removed
	 * from the queue)
	 */
	spin_lock_irqsave(&ha->vport_slock, flags);
	while (atomic_read(&vha->vref_count)) {
		spin_unlock_irqrestore(&ha->vport_slock, flags);

		msleep(500);

		spin_lock_irqsave(&ha->vport_slock, flags);
	}
	list_del(&vha->list);
86
	qlt_update_vp_map(vha, RESET_VP_IDX);
87 88
	spin_unlock_irqrestore(&ha->vport_slock, flags);

89 90
	vp_id = vha->vp_idx;
	ha->num_vhosts--;
91
	clear_bit(vp_id, ha->vp_idx_map);
92

93
	mutex_unlock(&ha->vport_lock);
94 95
}

A
Adrian Bunk 已提交
96
static scsi_qla_host_t *
97
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
98 99
{
	scsi_qla_host_t *vha;
100
	struct scsi_qla_host *tvha;
101
	unsigned long flags;
102

103
	spin_lock_irqsave(&ha->vport_slock, flags);
104
	/* Locate matching device in database. */
105
	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
106 107
		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
			spin_unlock_irqrestore(&ha->vport_slock, flags);
108
			return vha;
109
		}
110
	}
111
	spin_unlock_irqrestore(&ha->vport_slock, flags);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	return NULL;
}

/*
 * qla2x00_mark_vp_devices_dead
 *	Updates fcport state when device goes offline.
 *
 * Input:
 *	ha = adapter block pointer.
 *	fcport = port structure pointer.
 *
 * Return:
 *	None.
 *
 * Context:
 */
128
static void
129 130
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
131 132 133 134 135 136
	/*
	 * !!! NOTE !!!
	 * This function, if called in contexts other than vp create, disable
	 * or delete, please make sure this is synchronized with the
	 * delete thread.
	 */
137 138
	fc_port_t *fcport;

139
	list_for_each_entry(fcport, &vha->vp_fcports, list) {
140 141
		ql_dbg(ql_dbg_vport, vha, 0xa001,
		    "Marking port dead, loop_id=0x%04x : %x.\n",
142
		    fcport->loop_id, fcport->vha->vp_idx);
143 144

		qla2x00_mark_device_lost(vha, fcport, 0, 0);
145
		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
146 147 148 149 150 151 152 153 154 155 156 157
	}
}

int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
	int ret;

	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

158 159 160
	/* Remove port id from vp target map */
	qlt_update_vp_map(vha, RESET_AL_PA);

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	qla2x00_mark_vp_devices_dead(vha);
	atomic_set(&vha->vp_state, VP_FAILED);
	vha->flags.management_server_logged_in = 0;
	if (ret == QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
	} else {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		return -1;
	}
	return 0;
}

int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
	int ret;
177 178
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
179 180

	/* Check if physical ha port is Up */
181
	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
182 183
		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
		!(ha->current_topology & ISP_CFG_F)) {
184 185 186 187 188 189
		vha->vp_err_state =  VP_ERR_PORTDWN;
		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
		goto enable_failed;
	}

	/* Initialize the new vport unless it is a persistent port */
190
	mutex_lock(&ha->vport_lock);
191
	ret = qla24xx_modify_vp_config(vha);
192
	mutex_unlock(&ha->vport_lock);
193 194 195 196 197 198

	if (ret != QLA_SUCCESS) {
		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
		goto enable_failed;
	}

199 200
	ql_dbg(ql_dbg_taskm, vha, 0x801a,
	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
201 202 203
	return 0;

enable_failed:
204 205
	ql_dbg(ql_dbg_taskm, vha, 0x801b,
	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
206 207 208
	return 1;
}

209
static void
210 211 212 213 214 215 216
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
	struct fc_vport *fc_vport;
	int ret;

	fc_vport = vha->fc_vport;

217 218
	ql_dbg(ql_dbg_vport, vha, 0xa002,
	    "%s: change request #3.\n", __func__);
219 220
	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
	if (ret != QLA_SUCCESS) {
221 222
		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
		    "receiving of RSCN requests: 0x%x.\n", ret);
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		return;
	} else {
		/* Corresponds to SCR enabled */
		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
	}

	vha->flags.online = 1;
	if (qla24xx_configure_vhba(vha))
		return;

	atomic_set(&vha->vp_state, VP_ACTIVE);
	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}

void
238
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
239
{
240
	scsi_qla_host_t *vha;
241
	struct qla_hw_data *ha = rsp->hw;
242
	int i = 0;
243
	unsigned long flags;
244

245 246
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vha, &ha->vp_list, list) {
247
		if (vha->vp_idx) {
248 249 250
			atomic_inc(&vha->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

251 252 253 254 255 256 257 258 259
			switch (mb[0]) {
			case MBA_LIP_OCCURRED:
			case MBA_LOOP_UP:
			case MBA_LOOP_DOWN:
			case MBA_LIP_RESET:
			case MBA_POINT_TO_POINT:
			case MBA_CHG_IN_CONNECTION:
			case MBA_PORT_UPDATE:
			case MBA_RSCN_UPDATE:
260 261 262
				ql_dbg(ql_dbg_async, vha, 0x5024,
				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
				    i, *mb, vha);
263
				qla2x00_async_event(vha, rsp, mb);
264 265
				break;
			}
266 267 268

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vha->vref_count);
269
		}
270
		i++;
271
	}
272
	spin_unlock_irqrestore(&ha->vport_slock, flags);
273 274
}

275
int
276 277 278 279 280 281 282 283 284 285 286 287 288 289
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
	/*
	 * Physical port will do most of the abort and recovery work. We can
	 * just treat it as a loop down
	 */
	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
		atomic_set(&vha->loop_state, LOOP_DOWN);
		qla2x00_mark_all_devices_lost(vha, 0);
	} else {
		if (!atomic_read(&vha->loop_down_timer))
			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	}

290 291 292 293 294
	/*
	 * To exclusively reset vport, we need to log it out first.  Note: this
	 * control_vp can fail if ISP reset is already issued, this is
	 * expected, as the vp would be already logged out due to ISP reset.
	 */
295 296 297
	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);

298 299
	ql_dbg(ql_dbg_taskm, vha, 0x801d,
	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
300
	return qla24xx_enable_vp(vha);
301 302
}

A
Adrian Bunk 已提交
303
static int
304 305
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
306 307
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
308

309 310
	qla2x00_do_work(vha);

311 312
	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
		/* VP acquired. complete port configuration */
313 314
		ql_dbg(ql_dbg_dpc, vha, 0x4014,
		    "Configure VP scheduled.\n");
315
		qla24xx_configure_vp(vha);
316 317
		ql_dbg(ql_dbg_dpc, vha, 0x4015,
		    "Configure VP end.\n");
318 319 320
		return 0;
	}

321
	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
322 323
		ql_dbg(ql_dbg_dpc, vha, 0x4016,
		    "FCPort update scheduled.\n");
324 325
		qla2x00_update_fcports(vha);
		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
326 327
		ql_dbg(ql_dbg_dpc, vha, 0x4017,
		    "FCPort update end.\n");
328 329 330 331 332 333
	}

	if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
		!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
		atomic_read(&vha->loop_state) != LOOP_DOWN) {

334 335
		ql_dbg(ql_dbg_dpc, vha, 0x4018,
		    "Relogin needed scheduled.\n");
336
		qla2x00_relogin(vha);
337 338
		ql_dbg(ql_dbg_dpc, vha, 0x4019,
		    "Relogin needed end.\n");
339
	}
340 341 342 343 344 345

	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
	}

346
	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
347
		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
348 349
			ql_dbg(ql_dbg_dpc, vha, 0x401a,
			    "Loop resync scheduled.\n");
350 351
			qla2x00_loop_resync(vha);
			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
352 353
			ql_dbg(ql_dbg_dpc, vha, 0x401b,
			    "Loop resync end.\n");
354 355 356
		}
	}

357
	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
358
	    "Exiting %s.\n", __func__);
359 360 361 362
	return 0;
}

void
363
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
364 365
{
	int ret;
366 367
	struct qla_hw_data *ha = vha->hw;
	scsi_qla_host_t *vp;
368
	unsigned long flags = 0;
369

370
	if (vha->vp_idx)
371 372 373 374
		return;
	if (list_empty(&ha->vp_list))
		return;

375
	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
376

377 378 379
	if (!(ha->current_topology & ISP_CFG_F))
		return;

380 381 382 383 384 385
	spin_lock_irqsave(&ha->vport_slock, flags);
	list_for_each_entry(vp, &ha->vp_list, list) {
		if (vp->vp_idx) {
			atomic_inc(&vp->vref_count);
			spin_unlock_irqrestore(&ha->vport_slock, flags);

386
			ret = qla2x00_do_dpc_vp(vp);
387 388 389 390

			spin_lock_irqsave(&ha->vport_slock, flags);
			atomic_dec(&vp->vref_count);
		}
391
	}
392
	spin_unlock_irqrestore(&ha->vport_slock, flags);
393 394 395 396 397
}

int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
398 399
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
	scsi_qla_host_t *vha;
	uint8_t port_name[WWN_SIZE];

	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
		return VPCERR_UNSUPPORTED;

	/* Check up the F/W and H/W support NPIV */
	if (!ha->flags.npiv_supported)
		return VPCERR_UNSUPPORTED;

	/* Check up whether npiv supported switch presented */
	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
		return VPCERR_NO_FABRIC_SUPP;

	/* Check up unique WWPN */
	u64_to_wwn(fc_vport->port_name, port_name);
416
	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
417
		return VPCERR_BAD_WWN;
418 419 420 421 422 423
	vha = qla24xx_find_vhost_by_name(ha, port_name);
	if (vha)
		return VPCERR_BAD_WWN;

	/* Check up max-npiv-supports */
	if (ha->num_vhosts > ha->max_npiv_vports) {
424 425 426 427
		ql_dbg(ql_dbg_vport, vha, 0xa004,
		    "num_vhosts %ud is bigger "
		    "than max_npiv_vports %ud.\n",
		    ha->num_vhosts, ha->max_npiv_vports);
428 429 430 431 432 433 434 435
		return VPCERR_UNSUPPORTED;
	}
	return 0;
}

scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
436 437
	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
	struct qla_hw_data *ha = base_vha->hw;
438
	scsi_qla_host_t *vha;
439
	struct scsi_host_template *sht = &qla2xxx_driver_template;
440 441
	struct Scsi_Host *host;

442 443
	vha = qla2x00_create_host(sht, ha);
	if (!vha) {
444 445
		ql_log(ql_log_warn, vha, 0xa005,
		    "scsi_host_alloc() failed for vport.\n");
446 447 448
		return(NULL);
	}

449
	host = vha->host;
450 451 452 453 454 455 456 457 458
	fc_vport->dd_data = vha;
	/* New host info */
	u64_to_wwn(fc_vport->node_name, vha->node_name);
	u64_to_wwn(fc_vport->port_name, vha->port_name);

	vha->fc_vport = fc_vport;
	vha->device_flags = 0;
	vha->vp_idx = qla24xx_allocate_vp_id(vha);
	if (vha->vp_idx > ha->max_npiv_vports) {
459 460
		ql_dbg(ql_dbg_vport, vha, 0xa006,
		    "Couldn't allocate vp_id.\n");
461
		goto create_vhost_failed;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	}
	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;

	vha->dpc_flags = 0L;

	/*
	 * To fix the issue of processing a parent's RSCN for the vport before
	 * its SCR is complete.
	 */
	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);

	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);

477 478
	vha->req = base_vha->req;
	host->can_queue = base_vha->req->length + 128;
479
	host->cmd_per_lun = 3;
480
	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
481 482 483
		host->max_cmd_len = 32;
	else
		host->max_cmd_len = MAX_CMDSZ;
484
	host->max_channel = MAX_BUSES - 1;
485
	host->max_lun = ql2xmaxlun;
486
	host->unique_id = host->host_no;
487
	host->max_id = ha->max_fibre_devices;
488 489
	host->transportt = qla2xxx_transport_vport_template;

490 491 492
	ql_dbg(ql_dbg_vport, vha, 0xa007,
	    "Detect vport hba %ld at address = %p.\n",
	    vha->host_no, vha);
493 494 495

	vha->flags.init_done = 1;

496 497 498 499 500
	mutex_lock(&ha->vport_lock);
	set_bit(vha->vp_idx, ha->vp_idx_map);
	ha->cur_vport_count++;
	mutex_unlock(&ha->vport_lock);

501 502
	return vha;

503
create_vhost_failed:
504 505
	return NULL;
}
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558

static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = req->id;

	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
		sizeof(request_t), req->ring, req->dma);
	req->ring = NULL;
	req->dma = 0;
	if (que_id) {
		ha->req_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(req);
	req = NULL;
}

static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	struct qla_hw_data *ha = vha->hw;
	uint16_t que_id = rsp->id;

	if (rsp->msix && rsp->msix->have_irq) {
		free_irq(rsp->msix->vector, rsp);
		rsp->msix->have_irq = 0;
		rsp->msix->rsp = NULL;
	}
	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
		sizeof(response_t), rsp->ring, rsp->dma);
	rsp->ring = NULL;
	rsp->dma = 0;
	if (que_id) {
		ha->rsp_q_map[que_id] = NULL;
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
	}
	kfree(rsp);
	rsp = NULL;
}

int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
	int ret = -1;

	if (req) {
		req->options |= BIT_0;
559
		ret = qla25xx_init_req_que(vha, req);
560 561 562 563 564 565 566
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_req_que(vha, req);

	return ret;
}

567
static int
568 569 570 571 572 573
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
	int ret = -1;

	if (rsp) {
		rsp->options |= BIT_0;
574
		ret = qla25xx_init_rsp_que(vha, rsp);
575 576 577 578 579 580 581 582 583
	}
	if (ret == QLA_SUCCESS)
		qla25xx_free_rsp_que(vha, rsp);

	return ret;
}

/* Delete all queues for a given vhost */
int
584
qla25xx_delete_queues(struct scsi_qla_host *vha)
585 586 587 588 589 590
{
	int cnt, ret = 0;
	struct req_que *req = NULL;
	struct rsp_que *rsp = NULL;
	struct qla_hw_data *ha = vha->hw;

591 592 593
	/* Delete request queues */
	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
		req = ha->req_q_map[cnt];
594 595 596
		if (req) {
			ret = qla25xx_delete_req_que(vha, req);
			if (ret != QLA_SUCCESS) {
597 598 599
				ql_log(ql_log_warn, vha, 0x00ea,
				    "Couldn't delete req que %d.\n",
				    req->id);
600 601 602
				return ret;
			}
		}
603 604 605 606 607 608 609 610
	}

	/* Delete response queues */
	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
		rsp = ha->rsp_q_map[cnt];
		if (rsp) {
			ret = qla25xx_delete_rsp_que(vha, rsp);
			if (ret != QLA_SUCCESS) {
611 612 613
				ql_log(ql_log_warn, vha, 0x00eb,
				    "Couldn't delete rsp que %d.\n",
				    rsp->id);
614
				return ret;
615 616 617 618 619 620 621 622
			}
		}
	}
	return ret;
}

int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
623
	uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
624 625 626 627 628
{
	int ret = 0;
	struct req_que *req = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	uint16_t que_id = 0;
629
	device_reg_t __iomem *reg;
630
	uint32_t cnt;
631 632 633

	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
	if (req == NULL) {
634 635
		ql_log(ql_log_fatal, base_vha, 0x00d9,
		    "Failed to allocate memory for request queue.\n");
636
		goto failed;
637 638 639 640 641 642 643
	}

	req->length = REQUEST_ENTRY_CNT_24XX;
	req->ring = dma_alloc_coherent(&ha->pdev->dev,
			(req->length + 1) * sizeof(request_t),
			&req->dma, GFP_KERNEL);
	if (req->ring == NULL) {
644
		ql_log(ql_log_fatal, base_vha, 0x00da,
645
		    "Failed to allocate memory for request_ring.\n");
646 647 648 649
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
650 651
	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
	if (que_id >= ha->max_req_queues) {
652
		mutex_unlock(&ha->vport_lock);
653 654
		ql_log(ql_log_warn, base_vha, 0x00db,
		    "No resources to create additional request queue.\n");
655 656 657 658 659 660 661 662
		goto que_failed;
	}
	set_bit(que_id, ha->req_qid_map);
	ha->req_q_map[que_id] = req;
	req->rid = rid;
	req->vp_idx = vp_idx;
	req->qos = qos;

663 664 665 666 667 668
	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
	    que_id, req->rid, req->vp_idx, req->qos);
669 670 671
	if (rsp_que < 0)
		req->rsp = NULL;
	else
672 673 674 675 676 677 678 679
		req->rsp = ha->rsp_q_map[rsp_que];
	/* Use alternate PCI bus number */
	if (MSB(req->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(req->rid))
		options |= BIT_5;
	req->options = options;
680

681 682 683 684
	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
	    "options=0x%x.\n", req->options);
	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
	    "options=0x%x.\n", req->options);
685 686 687 688
	for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
		req->outstanding_cmds[cnt] = NULL;
	req->current_outstanding_cmd = 1;

689 690 691 692
	req->ring_ptr = req->ring;
	req->ring_index = 0;
	req->cnt = req->length;
	req->id = que_id;
693
	reg = ISP_QUE_REG(ha, que_id);
694
	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
695
	mutex_unlock(&ha->vport_lock);
696 697 698 699 700 701 702 703 704 705
	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index,
	    req->cnt, req->id, req->max_q_depth);
	ql_dbg(ql_dbg_init, base_vha, 0x00de,
	    "ring_ptr=%p ring_index=%d, "
	    "cnt=%d id=%d max_q_depth=%d.\n",
	    req->ring_ptr, req->ring_index, req->cnt,
	    req->id, req->max_q_depth);
706

707
	ret = qla25xx_init_req_que(base_vha, req);
708
	if (ret != QLA_SUCCESS) {
709 710
		ql_log(ql_log_fatal, base_vha, 0x00df,
		    "%s failed.\n", __func__);
711 712 713 714 715 716 717 718 719 720
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->req_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}

	return req->id;

que_failed:
	qla25xx_free_req_que(base_vha, req);
721
failed:
722 723 724
	return 0;
}

725 726
static void qla_do_work(struct work_struct *work)
{
727
	unsigned long flags;
728 729
	struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
	struct scsi_qla_host *vha;
730
	struct qla_hw_data *ha = rsp->hw;
731

732 733
	spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
	vha = pci_get_drvdata(ha->pdev);
734
	qla24xx_process_response_queue(vha, rsp);
735
	spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
736 737
}

738 739 740
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
741
	uint8_t vp_idx, uint16_t rid, int req)
742 743 744 745
{
	int ret = 0;
	struct rsp_que *rsp = NULL;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
746 747
	uint16_t que_id = 0;
	device_reg_t __iomem *reg;
748 749 750

	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
	if (rsp == NULL) {
751 752
		ql_log(ql_log_warn, base_vha, 0x0066,
		    "Failed to allocate memory for response queue.\n");
753
		goto failed;
754 755
	}

756
	rsp->length = RESPONSE_ENTRY_CNT_MQ;
757 758 759 760
	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
			(rsp->length + 1) * sizeof(response_t),
			&rsp->dma, GFP_KERNEL);
	if (rsp->ring == NULL) {
761 762
		ql_log(ql_log_warn, base_vha, 0x00e1,
		    "Failed to allocate memory for response ring.\n");
763 764 765 766
		goto que_failed;
	}

	mutex_lock(&ha->vport_lock);
767 768
	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
	if (que_id >= ha->max_rsp_queues) {
769
		mutex_unlock(&ha->vport_lock);
770 771
		ql_log(ql_log_warn, base_vha, 0x00e2,
		    "No resources to create additional request queue.\n");
772 773 774 775 776 777 778
		goto que_failed;
	}
	set_bit(que_id, ha->rsp_qid_map);

	if (ha->flags.msix_enabled)
		rsp->msix = &ha->msix_entries[que_id + 1];
	else
779 780
		ql_log(ql_log_warn, base_vha, 0x00e3,
		    "MSIX not enalbled.\n");
781 782 783 784 785

	ha->rsp_q_map[que_id] = rsp;
	rsp->rid = rid;
	rsp->vp_idx = vp_idx;
	rsp->hw = ha;
786 787 788
	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
	    "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
789 790 791 792 793 794
	/* Use alternate PCI bus number */
	if (MSB(rsp->rid))
		options |= BIT_4;
	/* Use alternate PCI devfn */
	if (LSB(rsp->rid))
		options |= BIT_5;
795 796 797 798
	/* Enable MSIX handshake mode on for uncapable adapters */
	if (!IS_MSIX_NACK_CAPABLE(ha))
		options |= BIT_6;

799 800
	rsp->options = options;
	rsp->id = que_id;
801 802 803
	reg = ISP_QUE_REG(ha, que_id);
	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
804
	mutex_unlock(&ha->vport_lock);
805 806 807 808 809 810 811 812
	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
	    rsp->options, rsp->id, rsp->rsp_q_in,
	    rsp->rsp_q_out);
813 814 815 816 817

	ret = qla25xx_request_irq(rsp);
	if (ret)
		goto que_failed;

818
	ret = qla25xx_init_rsp_que(base_vha, rsp);
819
	if (ret != QLA_SUCCESS) {
820 821
		ql_log(ql_log_fatal, base_vha, 0x00e7,
		    "%s failed.\n", __func__);
822 823 824 825 826
		mutex_lock(&ha->vport_lock);
		clear_bit(que_id, ha->rsp_qid_map);
		mutex_unlock(&ha->vport_lock);
		goto que_failed;
	}
827 828 829 830
	if (req >= 0)
		rsp->req = ha->req_q_map[req];
	else
		rsp->req = NULL;
831 832

	qla2x00_init_response_q_entries(rsp);
833 834
	if (rsp->hw->wq)
		INIT_WORK(&rsp->q_work, qla_do_work);
835 836 837 838
	return rsp->id;

que_failed:
	qla25xx_free_rsp_que(base_vha, rsp);
839
failed:
840 841
	return 0;
}