qla_bsg.c 69.1 KB
Newer Older
1
	/*
2
 * QLogic Fibre Channel HBA Driver
3
 * Copyright (c)  2003-2014 QLogic Corporation
4 5 6 7 8 9 10 11
 *
 * See LICENSE.qla2xxx for copyright and licensing details.
 */
#include "qla_def.h"

#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
12
#include <linux/bsg-lib.h>
13 14

/* BSG support for ELS/CT pass through */
15 16
void
qla2x00_bsg_job_done(void *data, void *ptr, int res)
17
{
18 19
	srb_t *sp = (srb_t *)ptr;
	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
20
	struct bsg_job *bsg_job = sp->u.bsg_job;
21
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
22

23
	bsg_reply->result = res;
J
Johannes Thumshirn 已提交
24
	bsg_job_done(bsg_job, bsg_reply->result,
25
		       bsg_reply->reply_payload_rcv_len);
26 27 28 29 30 31 32
	sp->free(vha, sp);
}

void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
	srb_t *sp = (srb_t *)ptr;
33
	struct scsi_qla_host *vha = sp->fcport->vha;
34
	struct bsg_job *bsg_job = sp->u.bsg_job;
35 36
	struct fc_bsg_request *bsg_request = bsg_job->request;

37
	struct qla_hw_data *ha = vha->hw;
38
	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
39

40 41
	if (sp->type == SRB_FXIOCB_BCMD) {
		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
42
		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
			dma_unmap_sg(&ha->pdev->dev,
			    bsg_job->request_payload.sg_list,
			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);

		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
			dma_unmap_sg(&ha->pdev->dev,
			    bsg_job->reply_payload.sg_list,
			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
	} else {
		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);

		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
	}
60 61

	if (sp->type == SRB_CT_CMD ||
62
	    sp->type == SRB_FXIOCB_BCMD ||
63 64
	    sp->type == SRB_ELS_CMD_HST)
		kfree(sp->fcport);
65
	qla2x00_rel_sp(vha, sp);
66 67
}

S
Sarang Radke 已提交
68
int
69 70
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
S
Sarang Radke 已提交
71 72 73 74
{
	int i, ret, num_valid;
	uint8_t *bcode;
	struct qla_fcp_prio_entry *pri_entry;
75
	uint32_t *bcode_val_ptr, bcode_val;
S
Sarang Radke 已提交
76 77 78 79

	ret = 1;
	num_valid = 0;
	bcode = (uint8_t *)pri_cfg;
80 81
	bcode_val_ptr = (uint32_t *)pri_cfg;
	bcode_val = (uint32_t)(*bcode_val_ptr);
S
Sarang Radke 已提交
82

83 84
	if (bcode_val == 0xFFFFFFFF) {
		/* No FCP Priority config data in flash */
85 86
		ql_dbg(ql_dbg_user, vha, 0x7051,
		    "No FCP Priority config data.\n");
87 88 89 90 91 92
		return 0;
	}

	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
			bcode[3] != 'S') {
		/* Invalid FCP priority data header*/
93 94 95
		ql_dbg(ql_dbg_user, vha, 0x7052,
		    "Invalid FCP Priority data header. bcode=0x%x.\n",
		    bcode_val);
S
Sarang Radke 已提交
96 97 98 99 100 101 102 103 104 105 106 107
		return 0;
	}
	if (flag != 1)
		return ret;

	pri_entry = &pri_cfg->entry[0];
	for (i = 0; i < pri_cfg->num_entries; i++) {
		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
			num_valid++;
		pri_entry++;
	}

108 109
	if (num_valid == 0) {
		/* No valid FCP priority data entries */
110 111
		ql_dbg(ql_dbg_user, vha, 0x7053,
		    "No valid FCP Priority data entries.\n");
S
Sarang Radke 已提交
112
		ret = 0;
113 114
	} else {
		/* FCP priority data is valid */
115 116 117
		ql_dbg(ql_dbg_user, vha, 0x7054,
		    "Valid FCP priority data. num entries = %d.\n",
		    num_valid);
118
	}
S
Sarang Radke 已提交
119 120 121 122 123

	return ret;
}

static int
124
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
S
Sarang Radke 已提交
125
{
126
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
127 128
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
S
Sarang Radke 已提交
129 130 131 132 133 134
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int ret = 0;
	uint32_t len;
	uint32_t oper;

135
	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
136 137 138 139
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}

S
Sarang Radke 已提交
140
	/* Get the sub command */
141
	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
S
Sarang Radke 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154

	/* Only set config is allowed if config memory is not allocated */
	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}
	switch (oper) {
	case QLFC_FCP_PRIO_DISABLE:
		if (ha->flags.fcp_prio_enabled) {
			ha->flags.fcp_prio_enabled = 0;
			ha->fcp_prio_cfg->attributes &=
				~FCP_PRIO_ATTR_ENABLE;
			qla24xx_update_all_fcp_prio(vha);
155
			bsg_reply->result = DID_OK;
S
Sarang Radke 已提交
156 157
		} else {
			ret = -EINVAL;
158
			bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
159 160 161 162 163 164 165 166 167 168 169
			goto exit_fcp_prio_cfg;
		}
		break;

	case QLFC_FCP_PRIO_ENABLE:
		if (!ha->flags.fcp_prio_enabled) {
			if (ha->fcp_prio_cfg) {
				ha->flags.fcp_prio_enabled = 1;
				ha->fcp_prio_cfg->attributes |=
				    FCP_PRIO_ATTR_ENABLE;
				qla24xx_update_all_fcp_prio(vha);
170
				bsg_reply->result = DID_OK;
S
Sarang Radke 已提交
171 172
			} else {
				ret = -EINVAL;
173
				bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
174 175 176 177 178 179 180 181 182
				goto exit_fcp_prio_cfg;
			}
		}
		break;

	case QLFC_FCP_PRIO_GET_CONFIG:
		len = bsg_job->reply_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
			ret = -EINVAL;
183
			bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
184 185 186
			goto exit_fcp_prio_cfg;
		}

187 188
		bsg_reply->result = DID_OK;
		bsg_reply->reply_payload_rcv_len =
S
Sarang Radke 已提交
189 190 191 192 193 194 195 196 197 198
			sg_copy_from_buffer(
			bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
			len);

		break;

	case QLFC_FCP_PRIO_SET_CONFIG:
		len = bsg_job->request_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
199
			bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
200 201 202 203 204 205 206
			ret = -EINVAL;
			goto exit_fcp_prio_cfg;
		}

		if (!ha->fcp_prio_cfg) {
			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
			if (!ha->fcp_prio_cfg) {
207 208 209
				ql_log(ql_log_warn, vha, 0x7050,
				    "Unable to allocate memory for fcp prio "
				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
210
				bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
211 212 213 214 215 216 217 218 219 220 221
				ret = -ENOMEM;
				goto exit_fcp_prio_cfg;
			}
		}

		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
			FCP_PRIO_CFG_SIZE);

		/* validate fcp priority data */
222 223 224

		if (!qla24xx_fcp_prio_cfg_valid(vha,
		    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
225
			bsg_reply->result = (DID_ERROR << 16);
S
Sarang Radke 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238
			ret = -EINVAL;
			/* If buffer was invalidatic int
			 * fcp_prio_cfg is of no use
			 */
			vfree(ha->fcp_prio_cfg);
			ha->fcp_prio_cfg = NULL;
			goto exit_fcp_prio_cfg;
		}

		ha->flags.fcp_prio_enabled = 0;
		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
			ha->flags.fcp_prio_enabled = 1;
		qla24xx_update_all_fcp_prio(vha);
239
		bsg_reply->result = DID_OK;
S
Sarang Radke 已提交
240 241 242 243 244 245
		break;
	default:
		ret = -EINVAL;
		break;
	}
exit_fcp_prio_cfg:
246
	if (!ret)
J
Johannes Thumshirn 已提交
247
		bsg_job_done(bsg_job, bsg_reply->result,
248
			       bsg_reply->reply_payload_rcv_len);
S
Sarang Radke 已提交
249 250
	return ret;
}
251

252
static int
253
qla2x00_process_els(struct bsg_job *bsg_job)
254
{
255
	struct fc_bsg_request *bsg_request = bsg_job->request;
256
	struct fc_rport *rport;
257
	fc_port_t *fcport = NULL;
258 259 260 261 262 263 264 265 266
	struct Scsi_Host *host;
	scsi_qla_host_t *vha;
	struct qla_hw_data *ha;
	srb_t *sp;
	const char *type;
	int req_sg_cnt, rsp_sg_cnt;
	int rval =  (DRIVER_ERROR << 16);
	uint16_t nextlid = 0;

267
	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
268
		rport = fc_bsg_to_rport(bsg_job);
269 270 271 272 273 274
		fcport = *(fc_port_t **) rport->dd_data;
		host = rport_to_shost(rport);
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_RPT_ELS";
	} else {
275
		host = fc_bsg_to_shost(bsg_job);
276 277 278 279 280
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_HST_ELS_NOLOGIN";
	}

281 282 283 284 285 286
	if (!vha->flags.online) {
		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
		rval = -EIO;
		goto done;
	}

287 288
	/* pass through is supported only for ISP 4Gb or higher */
	if (!IS_FWI2_CAPABLE(ha)) {
289 290
		ql_dbg(ql_dbg_user, vha, 0x7001,
		    "ELS passthru not supported for ISP23xx based adapters.\n");
291 292 293 294
		rval = -EPERM;
		goto done;
	}

295 296 297
	/*  Multiple SG's are not supported for ELS requests */
	if (bsg_job->request_payload.sg_cnt > 1 ||
		bsg_job->reply_payload.sg_cnt > 1) {
298 299 300 301 302
		ql_dbg(ql_dbg_user, vha, 0x7002,
		    "Multiple SG's are not suppored for ELS requests, "
		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
		    bsg_job->request_payload.sg_cnt,
		    bsg_job->reply_payload.sg_cnt);
303 304 305 306 307
		rval = -EPERM;
		goto done;
	}

	/* ELS request for rport */
308
	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
309 310 311 312
		/* make sure the rport is logged in,
		 * if not perform fabric login
		 */
		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
313 314 315
			ql_dbg(ql_dbg_user, vha, 0x7003,
			    "Failed to login port %06X for ELS passthru.\n",
			    fcport->d_id.b24);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
			rval = -EIO;
			goto done;
		}
	} else {
		/* Allocate a dummy fcport structure, since functions
		 * preparing the IOCB and mailbox command retrieves port
		 * specific information from fcport structure. For Host based
		 * ELS commands there will be no fcport structure allocated
		 */
		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
		if (!fcport) {
			rval = -ENOMEM;
			goto done;
		}

		/* Initialize all required  fields of fcport */
		fcport->vha = vha;
		fcport->d_id.b.al_pa =
334
			bsg_request->rqst_data.h_els.port_id[0];
335
		fcport->d_id.b.area =
336
			bsg_request->rqst_data.h_els.port_id[1];
337
		fcport->d_id.b.domain =
338
			bsg_request->rqst_data.h_els.port_id[2];
339 340 341 342 343 344 345 346 347 348 349 350
		fcport->loop_id =
			(fcport->d_id.b.al_pa == 0xFD) ?
			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
	}

	req_sg_cnt =
		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	if (!req_sg_cnt) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}
351 352 353

	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
354 355 356 357 358 359
        if (!rsp_sg_cnt) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
360
		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
361 362 363 364 365
		ql_log(ql_log_warn, vha, 0x7008,
		    "dma mapping resulted in different sg counts, "
		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
366 367 368 369 370
		rval = -EAGAIN;
		goto done_unmap_sg;
	}

	/* Alloc SRB structure */
371
	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
372 373
	if (!sp) {
		rval = -ENOMEM;
374
		goto done_unmap_sg;
375 376
	}

377
	sp->type =
378 379
		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
380
	sp->name =
381 382
		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
		 "bsg_els_rpt" : "bsg_els_hst");
383 384 385
	sp->u.bsg_job = bsg_job;
	sp->free = qla2x00_bsg_sp_free;
	sp->done = qla2x00_bsg_job_done;
386

387 388 389
	ql_dbg(ql_dbg_user, vha, 0x700a,
	    "bsg rqst type: %s els type: %x - loop-id=%x "
	    "portid=%-2x%02x%02x.\n", type,
390
	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
391
	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
392 393 394

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
395 396
		ql_log(ql_log_warn, vha, 0x700e,
		    "qla2x00_start_sp failed = %d\n", rval);
397
		qla2x00_rel_sp(vha, sp);
398 399 400 401 402 403 404 405 406 407 408 409 410
		rval = -EIO;
		goto done_unmap_sg;
	}
	return rval;

done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
	goto done_free_fcport;

done_free_fcport:
411
	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
412 413 414 415 416
		kfree(fcport);
done:
	return rval;
}

417
static inline uint16_t
418 419 420 421 422 423 424 425 426 427 428 429 430
qla24xx_calc_ct_iocbs(uint16_t dsds)
{
	uint16_t iocbs;

	iocbs = 1;
	if (dsds > 2) {
		iocbs += (dsds - 2) / 5;
		if ((dsds - 2) % 5)
			iocbs++;
	}
	return iocbs;
}

431
static int
432
qla2x00_process_ct(struct bsg_job *bsg_job)
433 434
{
	srb_t *sp;
435
	struct fc_bsg_request *bsg_request = bsg_job->request;
436
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
437 438 439 440 441 442 443 444 445 446 447
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = (DRIVER_ERROR << 16);
	int req_sg_cnt, rsp_sg_cnt;
	uint16_t loop_id;
	struct fc_port *fcport;
	char  *type = "FC_BSG_HST_CT";

	req_sg_cnt =
		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
448
	if (!req_sg_cnt) {
449 450
		ql_log(ql_log_warn, vha, 0x700f,
		    "dma_map_sg return %d for request\n", req_sg_cnt);
451 452 453 454 455 456 457
		rval = -ENOMEM;
		goto done;
	}

	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
	if (!rsp_sg_cnt) {
458 459
		ql_log(ql_log_warn, vha, 0x7010,
		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
460 461 462 463 464
		rval = -ENOMEM;
		goto done;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
465
	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
466 467 468 469
		ql_log(ql_log_warn, vha, 0x7011,
		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
470
		rval = -EAGAIN;
471
		goto done_unmap_sg;
472 473 474
	}

	if (!vha->flags.online) {
475 476
		ql_log(ql_log_warn, vha, 0x7012,
		    "Host is not online.\n");
477 478 479 480 481
		rval = -EIO;
		goto done_unmap_sg;
	}

	loop_id =
482
		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
483 484
			>> 24;
	switch (loop_id) {
485 486 487 488 489 490 491
	case 0xFC:
		loop_id = cpu_to_le16(NPH_SNS);
		break;
	case 0xFA:
		loop_id = vha->mgmt_svr_loop_id;
		break;
	default:
492 493
		ql_dbg(ql_dbg_user, vha, 0x7013,
		    "Unknown loop id: %x.\n", loop_id);
494 495
		rval = -EINVAL;
		goto done_unmap_sg;
496 497 498 499 500 501 502 503
	}

	/* Allocate a dummy fcport structure, since functions preparing the
	 * IOCB and mailbox command retrieves port specific information
	 * from fcport structure. For Host based ELS commands there will be
	 * no fcport structure allocated
	 */
	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
504
	if (!fcport) {
505 506
		ql_log(ql_log_warn, vha, 0x7014,
		    "Failed to allocate fcport.\n");
507
		rval = -ENOMEM;
508
		goto done_unmap_sg;
509 510 511 512
	}

	/* Initialize all required  fields of fcport */
	fcport->vha = vha;
513 514 515
	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
516 517 518
	fcport->loop_id = loop_id;

	/* Alloc SRB structure */
519
	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
520
	if (!sp) {
521
		ql_log(ql_log_warn, vha, 0x7015,
522
		    "qla2x00_get_sp failed.\n");
523 524 525 526
		rval = -ENOMEM;
		goto done_free_fcport;
	}

527 528 529 530 531 532
	sp->type = SRB_CT_CMD;
	sp->name = "bsg_ct";
	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
	sp->u.bsg_job = bsg_job;
	sp->free = qla2x00_bsg_sp_free;
	sp->done = qla2x00_bsg_job_done;
533

534 535 536
	ql_dbg(ql_dbg_user, vha, 0x7016,
	    "bsg rqst type: %s else type: %x - "
	    "loop-id=%x portid=%02x%02x%02x.\n", type,
537
	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
538 539
	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
	    fcport->d_id.b.al_pa);
540 541 542

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
543 544
		ql_log(ql_log_warn, vha, 0x7017,
		    "qla2x00_start_sp failed=%d.\n", rval);
545
		qla2x00_rel_sp(vha, sp);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
		rval = -EIO;
		goto done_free_fcport;
	}
	return rval;

done_free_fcport:
	kfree(fcport);
done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done:
	return rval;
}
561 562 563 564

/* Disable loopback mode */
static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
565
			    int wait, int wait2)
566 567 568 569 570 571
{
	int ret = 0;
	int rval = 0;
	uint16_t new_config[4];
	struct qla_hw_data *ha = vha->hw;

572
	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
573 574 575 576 577 578 579 580 581 582 583 584 585
		goto done_reset_internal;

	memset(new_config, 0 , sizeof(new_config));
	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
	    ENABLE_INTERNAL_LOOPBACK ||
	    (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
	    ENABLE_EXTERNAL_LOOPBACK) {
		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
		ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
		    (new_config[0] & INTERNAL_LOOPBACK_MASK));
		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;

		ha->notify_dcbx_comp = wait;
586 587
		ha->notify_lb_portup_comp = wait2;

588 589 590 591 592
		ret = qla81xx_set_port_config(vha, new_config);
		if (ret != QLA_SUCCESS) {
			ql_log(ql_log_warn, vha, 0x7025,
			    "Set port config failed.\n");
			ha->notify_dcbx_comp = 0;
593
			ha->notify_lb_portup_comp = 0;
594 595 596 597 598 599
			rval = -EINVAL;
			goto done_reset_internal;
		}

		/* Wait for DCBX complete event */
		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
600
			(DCBX_COMP_TIMEOUT * HZ))) {
601
			ql_dbg(ql_dbg_user, vha, 0x7026,
602
			    "DCBX completion not received.\n");
603
			ha->notify_dcbx_comp = 0;
604
			ha->notify_lb_portup_comp = 0;
605 606 607 608
			rval = -EINVAL;
			goto done_reset_internal;
		} else
			ql_dbg(ql_dbg_user, vha, 0x7027,
609 610 611 612 613 614 615 616 617 618 619 620 621
			    "DCBX completion received.\n");

		if (wait2 &&
		    !wait_for_completion_timeout(&ha->lb_portup_comp,
		    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
			ql_dbg(ql_dbg_user, vha, 0x70c5,
			    "Port up completion not received.\n");
			ha->notify_lb_portup_comp = 0;
			rval = -EINVAL;
			goto done_reset_internal;
		} else
			ql_dbg(ql_dbg_user, vha, 0x70c6,
			    "Port up completion received.\n");
622 623

		ha->notify_dcbx_comp = 0;
624
		ha->notify_lb_portup_comp = 0;
625 626 627 628 629
	}
done_reset_internal:
	return rval;
}

630 631 632
/*
 * Set the port configuration to enable the internal or external loopback
 * depending on the loopback mode.
633 634
 */
static inline int
635 636
qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
	uint16_t *new_config, uint16_t mode)
637 638 639
{
	int ret = 0;
	int rval = 0;
640
	unsigned long rem_tmo = 0, current_tmo = 0;
641 642
	struct qla_hw_data *ha = vha->hw;

643
	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
644 645
		goto done_set_internal;

646 647 648 649 650 651 652 653
	if (mode == INTERNAL_LOOPBACK)
		new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
	else if (mode == EXTERNAL_LOOPBACK)
		new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
	ql_dbg(ql_dbg_user, vha, 0x70be,
	     "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));

	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
654 655 656 657

	ha->notify_dcbx_comp = 1;
	ret = qla81xx_set_port_config(vha, new_config);
	if (ret != QLA_SUCCESS) {
658 659
		ql_log(ql_log_warn, vha, 0x7021,
		    "set port config failed.\n");
660 661 662 663 664 665
		ha->notify_dcbx_comp = 0;
		rval = -EINVAL;
		goto done_set_internal;
	}

	/* Wait for DCBX complete event */
666 667 668 669 670 671 672 673 674 675 676 677 678
	current_tmo = DCBX_COMP_TIMEOUT * HZ;
	while (1) {
		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
		    current_tmo);
		if (!ha->idc_extend_tmo || rem_tmo) {
			ha->idc_extend_tmo = 0;
			break;
		}
		current_tmo = ha->idc_extend_tmo * HZ;
		ha->idc_extend_tmo = 0;
	}

	if (!rem_tmo) {
679
		ql_dbg(ql_dbg_user, vha, 0x7022,
680 681
		    "DCBX completion not received.\n");
		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
682 683 684 685 686 687 688 689
		/*
		 * If the reset of the loopback mode doesn't work take a FCoE
		 * dump and reset the chip.
		 */
		if (ret) {
			ha->isp_ops->fw_dump(vha, 0);
			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
		}
690 691 692 693 694 695 696 697 698
		rval = -EINVAL;
	} else {
		if (ha->flags.idc_compl_status) {
			ql_dbg(ql_dbg_user, vha, 0x70c3,
			    "Bad status in IDC Completion AEN\n");
			rval = -EINVAL;
			ha->flags.idc_compl_status = 0;
		} else
			ql_dbg(ql_dbg_user, vha, 0x7023,
699
			    "DCBX completion received.\n");
700
	}
701 702

	ha->notify_dcbx_comp = 0;
703
	ha->idc_extend_tmo = 0;
704 705 706 707 708

done_set_internal:
	return rval;
}

709
static int
710
qla2x00_process_loopback(struct bsg_job *bsg_job)
711
{
712 713
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
714
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
715 716 717 718 719 720 721
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval;
	uint8_t command_sent;
	char *type;
	struct msg_echo_lb elreq;
	uint16_t response[MAILBOX_REGISTER_COUNT];
722
	uint16_t config[4], new_config[4];
723
	uint8_t *fw_sts_ptr;
724 725 726 727 728 729 730 731
	uint8_t *req_data = NULL;
	dma_addr_t req_data_dma;
	uint32_t req_data_len;
	uint8_t *rsp_data = NULL;
	dma_addr_t rsp_data_dma;
	uint32_t rsp_data_len;

	if (!vha->flags.online) {
732
		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
733 734 735 736 737 738 739
		return -EIO;
	}

	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
		DMA_TO_DEVICE);

740 741 742
	if (!elreq.req_sg_cnt) {
		ql_log(ql_log_warn, vha, 0x701a,
		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
743
		return -ENOMEM;
744
	}
745 746 747 748 749 750

	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
		DMA_FROM_DEVICE);

	if (!elreq.rsp_sg_cnt) {
751 752
		ql_log(ql_log_warn, vha, 0x701b,
		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
753 754
		rval = -ENOMEM;
		goto done_unmap_req_sg;
755
	}
756 757 758

	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
759 760 761 762 763 764
		ql_log(ql_log_warn, vha, 0x701c,
		    "dma mapping resulted in different sg counts, "
		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
765 766 767 768 769 770 771
		rval = -EAGAIN;
		goto done_unmap_sg;
	}
	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
		&req_data_dma, GFP_KERNEL);
	if (!req_data) {
772 773
		ql_log(ql_log_warn, vha, 0x701d,
		    "dma alloc failed for req_data.\n");
774 775 776 777 778 779 780
		rval = -ENOMEM;
		goto done_unmap_sg;
	}

	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
		&rsp_data_dma, GFP_KERNEL);
	if (!rsp_data) {
781 782
		ql_log(ql_log_warn, vha, 0x7004,
		    "dma alloc failed for rsp_data.\n");
783 784 785 786 787 788 789 790 791 792 793 794
		rval = -ENOMEM;
		goto done_free_dma_req;
	}

	/* Copy the request buffer in req_data now */
	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, req_data, req_data_len);

	elreq.send_dma = req_data_dma;
	elreq.rcv_dma = rsp_data_dma;
	elreq.transfer_size = req_data_len;

795
	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
796
	elreq.iteration_count =
797
	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
798

799 800
	if (atomic_read(&vha->loop_state) == LOOP_READY &&
	    (ha->current_topology == ISP_CFG_F ||
801
	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
802 803 804 805
	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
		elreq.options == EXTERNAL_LOOPBACK) {
		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
806 807
		ql_dbg(ql_dbg_user, vha, 0x701e,
		    "BSG request type: %s.\n", type);
808 809 810
		command_sent = INT_DEF_LB_ECHO_CMD;
		rval = qla2x00_echo_test(vha, &elreq, response);
	} else {
811
		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
812 813
			memset(config, 0, sizeof(config));
			memset(new_config, 0, sizeof(new_config));
814

815
			if (qla81xx_get_port_config(vha, config)) {
816 817
				ql_log(ql_log_warn, vha, 0x701f,
				    "Get port config failed.\n");
818
				rval = -EPERM;
819
				goto done_free_dma_rsp;
820 821
			}

822 823 824 825 826 827 828 829
			if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
				ql_dbg(ql_dbg_user, vha, 0x70c4,
				    "Loopback operation already in "
				    "progress.\n");
				rval = -EAGAIN;
				goto done_free_dma_rsp;
			}

830 831 832 833
			ql_dbg(ql_dbg_user, vha, 0x70c0,
			    "elreq.options=%04x\n", elreq.options);

			if (elreq.options == EXTERNAL_LOOPBACK)
834
				if (IS_QLA8031(ha) || IS_QLA8044(ha))
835 836 837 838
					rval = qla81xx_set_loopback_mode(vha,
					    config, new_config, elreq.options);
				else
					rval = qla81xx_reset_loopback_mode(vha,
839
					    config, 1, 0);
840 841 842 843 844 845
			else
				rval = qla81xx_set_loopback_mode(vha, config,
				    new_config, elreq.options);

			if (rval) {
				rval = -EPERM;
846
				goto done_free_dma_rsp;
847 848 849
			}

			type = "FC_BSG_HST_VENDOR_LOOPBACK";
850 851
			ql_dbg(ql_dbg_user, vha, 0x7028,
			    "BSG request type: %s.\n", type);
852 853 854 855

			command_sent = INT_DEF_LB_LOOPBACK_CMD;
			rval = qla2x00_loopback_test(vha, &elreq, response);

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
			if (response[0] == MBS_COMMAND_ERROR &&
					response[1] == MBS_LB_RESET) {
				ql_log(ql_log_warn, vha, 0x7029,
				    "MBX command error, Aborting ISP.\n");
				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
				qla2xxx_wake_dpc(vha);
				qla2x00_wait_for_chip_reset(vha);
				/* Also reset the MPI */
				if (IS_QLA81XX(ha)) {
					if (qla81xx_restart_mpi_firmware(vha) !=
					    QLA_SUCCESS) {
						ql_log(ql_log_warn, vha, 0x702a,
						    "MPI reset failed.\n");
					}
				}

				rval = -EIO;
				goto done_free_dma_rsp;
			}

876
			if (new_config[0]) {
877 878
				int ret;

879 880 881
				/* Revert back to original port config
				 * Also clear internal loopback
				 */
882
				ret = qla81xx_reset_loopback_mode(vha,
883
				    new_config, 0, 1);
884 885 886 887 888 889 890 891 892 893 894
				if (ret) {
					/*
					 * If the reset of the loopback mode
					 * doesn't work take FCoE dump and then
					 * reset the chip.
					 */
					ha->isp_ops->fw_dump(vha, 0);
					set_bit(ISP_ABORT_NEEDED,
					    &vha->dpc_flags);
				}

895 896 897 898
			}

		} else {
			type = "FC_BSG_HST_VENDOR_LOOPBACK";
899 900
			ql_dbg(ql_dbg_user, vha, 0x702b,
			    "BSG request type: %s.\n", type);
901 902
			command_sent = INT_DEF_LB_LOOPBACK_CMD;
			rval = qla2x00_loopback_test(vha, &elreq, response);
903 904 905 906
		}
	}

	if (rval) {
907 908
		ql_log(ql_log_warn, vha, 0x702c,
		    "Vendor request %s failed.\n", type);
909 910

		rval = 0;
911 912
		bsg_reply->result = (DID_ERROR << 16);
		bsg_reply->reply_payload_rcv_len = 0;
913
	} else {
914 915
		ql_dbg(ql_dbg_user, vha, 0x702d,
		    "Vendor request %s completed.\n", type);
916
		bsg_reply->result = (DID_OK << 16);
917 918 919 920
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, rsp_data,
			rsp_data_len);
	}
921 922 923 924 925 926 927 928

	bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
	    sizeof(response) + sizeof(uint8_t);
	fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
	    sizeof(struct fc_bsg_reply);
	memcpy(fw_sts_ptr, response, sizeof(response));
	fw_sts_ptr += sizeof(response);
	*fw_sts_ptr = command_sent;
929

930
done_free_dma_rsp:
931 932 933 934 935 936 937 938 939 940 941 942 943
	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
		rsp_data, rsp_data_dma);
done_free_dma_req:
	dma_free_coherent(&ha->pdev->dev, req_data_len,
		req_data, req_data_dma);
done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done_unmap_req_sg:
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
944
	if (!rval)
J
Johannes Thumshirn 已提交
945
		bsg_job_done(bsg_job, bsg_reply->result,
946
			       bsg_reply->reply_payload_rcv_len);
947
	return rval;
948 949 950
}

static int
951
qla84xx_reset(struct bsg_job *bsg_job)
952
{
953
	struct fc_bsg_request *bsg_request = bsg_job->request;
954
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
955
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
956 957 958 959 960 961
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint32_t flag;

	if (!IS_QLA84XX(ha)) {
962
		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
963 964 965
		return -EINVAL;
	}

966
	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
967 968 969 970

	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);

	if (rval) {
971 972
		ql_log(ql_log_warn, vha, 0x7030,
		    "Vendor request 84xx reset failed.\n");
973
		rval = (DID_ERROR << 16);
974 975

	} else {
976 977
		ql_dbg(ql_dbg_user, vha, 0x7031,
		    "Vendor request 84xx reset completed.\n");
978
		bsg_reply->result = DID_OK;
J
Johannes Thumshirn 已提交
979
		bsg_job_done(bsg_job, bsg_reply->result,
980
			       bsg_reply->reply_payload_rcv_len);
981 982 983 984 985 986
	}

	return rval;
}

static int
987
qla84xx_updatefw(struct bsg_job *bsg_job)
988
{
989 990
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	struct verify_chip_entry_84xx *mn = NULL;
	dma_addr_t mn_dma, fw_dma;
	void *fw_buf = NULL;
	int rval = 0;
	uint32_t sg_cnt;
	uint32_t data_len;
	uint16_t options;
	uint32_t flag;
	uint32_t fw_ver;

	if (!IS_QLA84XX(ha)) {
1005 1006
		ql_dbg(ql_dbg_user, vha, 0x7032,
		    "Not 84xx, exiting.\n");
1007 1008 1009 1010 1011
		return -EINVAL;
	}

	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1012 1013 1014
	if (!sg_cnt) {
		ql_log(ql_log_warn, vha, 0x7033,
		    "dma_map_sg returned %d for request.\n", sg_cnt);
1015
		return -ENOMEM;
1016
	}
1017 1018

	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1019 1020 1021 1022
		ql_log(ql_log_warn, vha, 0x7034,
		    "DMA mapping resulted in different sg counts, "
		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
		    bsg_job->request_payload.sg_cnt, sg_cnt);
1023 1024 1025 1026 1027 1028 1029 1030
		rval = -EAGAIN;
		goto done_unmap_sg;
	}

	data_len = bsg_job->request_payload.payload_len;
	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
		&fw_dma, GFP_KERNEL);
	if (!fw_buf) {
1031 1032
		ql_log(ql_log_warn, vha, 0x7035,
		    "DMA alloc failed for fw_buf.\n");
1033 1034 1035 1036 1037 1038 1039 1040 1041
		rval = -ENOMEM;
		goto done_unmap_sg;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, fw_buf, data_len);

	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
	if (!mn) {
1042 1043
		ql_log(ql_log_warn, vha, 0x7036,
		    "DMA alloc failed for fw buffer.\n");
1044 1045 1046 1047
		rval = -ENOMEM;
		goto done_free_fw_buf;
	}

1048
	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));

	memset(mn, 0, sizeof(struct access_chip_84xx));
	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
	mn->entry_count = 1;

	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
		options |= VCO_DIAG_FW;

	mn->options = cpu_to_le16(options);
	mn->fw_ver =  cpu_to_le32(fw_ver);
	mn->fw_size =  cpu_to_le32(data_len);
	mn->fw_seq_size =  cpu_to_le32(data_len);
	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
	mn->dseg_length = cpu_to_le32(data_len);
	mn->data_seg_cnt = cpu_to_le16(1);

	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);

	if (rval) {
1071 1072
		ql_log(ql_log_warn, vha, 0x7037,
		    "Vendor request 84xx updatefw failed.\n");
1073

1074
		rval = (DID_ERROR << 16);
1075
	} else {
1076 1077
		ql_dbg(ql_dbg_user, vha, 0x7038,
		    "Vendor request 84xx updatefw completed.\n");
1078 1079

		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1080
		bsg_reply->result = DID_OK;
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	}

	dma_pool_free(ha->s_dma_pool, mn, mn_dma);

done_free_fw_buf:
	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);

done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);

1092
	if (!rval)
J
Johannes Thumshirn 已提交
1093
		bsg_job_done(bsg_job, bsg_reply->result,
1094
			       bsg_reply->reply_payload_rcv_len);
1095 1096 1097 1098
	return rval;
}

static int
1099
qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1100
{
1101 1102
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1103
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1104 1105 1106 1107 1108 1109 1110 1111
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	struct access_chip_84xx *mn = NULL;
	dma_addr_t mn_dma, mgmt_dma;
	void *mgmt_b = NULL;
	int rval = 0;
	struct qla_bsg_a84_mgmt *ql84_mgmt;
	uint32_t sg_cnt;
1112
	uint32_t data_len = 0;
1113 1114 1115
	uint32_t dma_direction = DMA_NONE;

	if (!IS_QLA84XX(ha)) {
1116 1117
		ql_log(ql_log_warn, vha, 0x703a,
		    "Not 84xx, exiting.\n");
1118 1119 1120 1121 1122
		return -EINVAL;
	}

	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
	if (!mn) {
1123 1124
		ql_log(ql_log_warn, vha, 0x703c,
		    "DMA alloc failed for fw buffer.\n");
1125 1126 1127 1128 1129 1130
		return -ENOMEM;
	}

	memset(mn, 0, sizeof(struct access_chip_84xx));
	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
	mn->entry_count = 1;
1131
	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1132 1133 1134 1135 1136 1137 1138
	switch (ql84_mgmt->mgmt.cmd) {
	case QLA84_MGMT_READ_MEM:
	case QLA84_MGMT_GET_INFO:
		sg_cnt = dma_map_sg(&ha->pdev->dev,
			bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
		if (!sg_cnt) {
1139 1140
			ql_log(ql_log_warn, vha, 0x703d,
			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1141 1142 1143 1144 1145 1146 1147
			rval = -ENOMEM;
			goto exit_mgmt;
		}

		dma_direction = DMA_FROM_DEVICE;

		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1148 1149 1150 1151
			ql_log(ql_log_warn, vha, 0x703e,
			    "DMA mapping resulted in different sg counts, "
			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1152 1153 1154 1155 1156 1157 1158 1159 1160
			rval = -EAGAIN;
			goto done_unmap_sg;
		}

		data_len = bsg_job->reply_payload.payload_len;

		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
		    &mgmt_dma, GFP_KERNEL);
		if (!mgmt_b) {
1161 1162
			ql_log(ql_log_warn, vha, 0x703f,
			    "DMA alloc failed for mgmt_b.\n");
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
			rval = -ENOMEM;
			goto done_unmap_sg;
		}

		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
			mn->parameter1 =
				cpu_to_le32(
				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);

		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
			mn->parameter1 =
				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);

			mn->parameter2 =
				cpu_to_le32(
				ql84_mgmt->mgmt.mgmtp.u.info.context);
		}
		break;

	case QLA84_MGMT_WRITE_MEM:
		sg_cnt = dma_map_sg(&ha->pdev->dev,
			bsg_job->request_payload.sg_list,
			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);

		if (!sg_cnt) {
1190 1191
			ql_log(ql_log_warn, vha, 0x7040,
			    "dma_map_sg returned %d.\n", sg_cnt);
1192 1193 1194 1195 1196 1197 1198
			rval = -ENOMEM;
			goto exit_mgmt;
		}

		dma_direction = DMA_TO_DEVICE;

		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1199 1200 1201 1202
			ql_log(ql_log_warn, vha, 0x7041,
			    "DMA mapping resulted in different sg counts, "
			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
			    bsg_job->request_payload.sg_cnt, sg_cnt);
1203 1204 1205 1206 1207 1208 1209 1210
			rval = -EAGAIN;
			goto done_unmap_sg;
		}

		data_len = bsg_job->request_payload.payload_len;
		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
			&mgmt_dma, GFP_KERNEL);
		if (!mgmt_b) {
1211 1212
			ql_log(ql_log_warn, vha, 0x7042,
			    "DMA alloc failed for mgmt_b.\n");
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
			rval = -ENOMEM;
			goto done_unmap_sg;
		}

		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);

		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
		mn->parameter1 =
			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
		break;

	case QLA84_MGMT_CHNG_CONFIG:
		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
		mn->parameter1 =
			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);

		mn->parameter2 =
			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);

		mn->parameter3 =
			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
		break;

	default:
		rval = -EIO;
		goto exit_mgmt;
	}

	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
		mn->dseg_count = cpu_to_le16(1);
		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
	}

	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);

	if (rval) {
1253 1254
		ql_log(ql_log_warn, vha, 0x7043,
		    "Vendor request 84xx mgmt failed.\n");
1255

1256
		rval = (DID_ERROR << 16);
1257 1258

	} else {
1259 1260
		ql_dbg(ql_dbg_user, vha, 0x7044,
		    "Vendor request 84xx mgmt completed.\n");
1261 1262

		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1263
		bsg_reply->result = DID_OK;
1264 1265 1266

		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1267
			bsg_reply->reply_payload_rcv_len =
1268 1269 1270
				bsg_job->reply_payload.payload_len;

			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1271 1272
				bsg_job->reply_payload.sg_cnt, mgmt_b,
				data_len);
1273 1274 1275 1276
		}
	}

done_unmap_sg:
1277 1278 1279
	if (mgmt_b)
		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	if (dma_direction == DMA_TO_DEVICE)
		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	else if (dma_direction == DMA_FROM_DEVICE)
		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);

exit_mgmt:
	dma_pool_free(ha->s_dma_pool, mn, mn_dma);

1290
	if (!rval)
J
Johannes Thumshirn 已提交
1291
		bsg_job_done(bsg_job, bsg_reply->result,
1292
			       bsg_reply->reply_payload_rcv_len);
1293 1294 1295 1296
	return rval;
}

static int
1297
qla24xx_iidma(struct bsg_job *bsg_job)
1298
{
1299 1300
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1301
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1302 1303 1304 1305
	scsi_qla_host_t *vha = shost_priv(host);
	int rval = 0;
	struct qla_port_param *port_param = NULL;
	fc_port_t *fcport = NULL;
1306
	int found = 0;
1307 1308 1309 1310
	uint16_t mb[MAILBOX_REGISTER_COUNT];
	uint8_t *rsp_ptr = NULL;

	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1311
		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1312 1313 1314
		return -EINVAL;
	}

1315
	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1316
	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1317 1318
		ql_log(ql_log_warn, vha, 0x7048,
		    "Invalid destination type.\n");
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
		return -EINVAL;
	}

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->port_type != FCT_TARGET)
			continue;

		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
			fcport->port_name, sizeof(fcport->port_name)))
			continue;
1329 1330

		found = 1;
1331 1332 1333
		break;
	}

1334
	if (!found) {
1335 1336
		ql_log(ql_log_warn, vha, 0x7049,
		    "Failed to find port.\n");
1337 1338 1339
		return -EINVAL;
	}

1340
	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1341 1342
		ql_log(ql_log_warn, vha, 0x704a,
		    "Port is not online.\n");
1343 1344 1345
		return -EINVAL;
	}

1346
	if (fcport->flags & FCF_LOGIN_NEEDED) {
1347 1348
		ql_log(ql_log_warn, vha, 0x704b,
		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1349 1350 1351
		return -EINVAL;
	}

1352 1353 1354 1355 1356 1357 1358 1359
	if (port_param->mode)
		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
			port_param->speed, mb);
	else
		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
			&port_param->speed, mb);

	if (rval) {
1360
		ql_log(ql_log_warn, vha, 0x704c,
1361 1362 1363
		    "iIDMA cmd failed for %8phN -- "
		    "%04x %x %04x %04x.\n", fcport->port_name,
		    rval, fcport->fp_speed, mb[0], mb[1]);
1364
		rval = (DID_ERROR << 16);
1365 1366 1367 1368 1369
	} else {
		if (!port_param->mode) {
			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
				sizeof(struct qla_port_param);

1370
			rsp_ptr = ((uint8_t *)bsg_reply) +
1371 1372 1373 1374 1375 1376
				sizeof(struct fc_bsg_reply);

			memcpy(rsp_ptr, port_param,
				sizeof(struct qla_port_param));
		}

1377
		bsg_reply->result = DID_OK;
J
Johannes Thumshirn 已提交
1378
		bsg_job_done(bsg_job, bsg_reply->result,
1379
			       bsg_reply->reply_payload_rcv_len);
1380 1381 1382 1383 1384
	}

	return rval;
}

1385
static int
1386
qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1387 1388
	uint8_t is_update)
{
1389
	struct fc_bsg_request *bsg_request = bsg_job->request;
1390 1391
	uint32_t start = 0;
	int valid = 0;
1392
	struct qla_hw_data *ha = vha->hw;
1393 1394 1395 1396

	if (unlikely(pci_channel_offline(ha->pdev)))
		return -EINVAL;

1397
	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1398 1399 1400
	if (start > ha->optrom_size) {
		ql_log(ql_log_warn, vha, 0x7055,
		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1401
		return -EINVAL;
1402
	}
1403

1404 1405 1406
	if (ha->optrom_state != QLA_SWAITING) {
		ql_log(ql_log_info, vha, 0x7056,
		    "optrom_state %d.\n", ha->optrom_state);
1407
		return -EBUSY;
1408
	}
1409 1410

	ha->optrom_region_start = start;
1411
	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1412 1413 1414 1415 1416 1417 1418
	if (is_update) {
		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
			valid = 1;
		else if (start == (ha->flt_region_boot * 4) ||
		    start == (ha->flt_region_fw * 4))
			valid = 1;
		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1419
		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1420 1421
			valid = 1;
		if (!valid) {
1422 1423 1424
			ql_log(ql_log_warn, vha, 0x7058,
			    "Invalid start region 0x%x/0x%x.\n", start,
			    bsg_job->request_payload.payload_len);
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
			return -EINVAL;
		}

		ha->optrom_region_size = start +
		    bsg_job->request_payload.payload_len > ha->optrom_size ?
		    ha->optrom_size - start :
		    bsg_job->request_payload.payload_len;
		ha->optrom_state = QLA_SWRITING;
	} else {
		ha->optrom_region_size = start +
		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
		    ha->optrom_size - start :
		    bsg_job->reply_payload.payload_len;
		ha->optrom_state = QLA_SREADING;
	}

	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
	if (!ha->optrom_buffer) {
1443
		ql_log(ql_log_warn, vha, 0x7059,
1444
		    "Read: Unable to allocate memory for optrom retrieval "
1445
		    "(%x)\n", ha->optrom_region_size);
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455

		ha->optrom_state = QLA_SWAITING;
		return -ENOMEM;
	}

	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
	return 0;
}

static int
1456
qla2x00_read_optrom(struct bsg_job *bsg_job)
1457
{
1458
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1459
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1460 1461 1462 1463
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;

1464
	if (ha->flags.nic_core_reset_hdlr_active)
1465 1466
		return -EBUSY;

1467
	mutex_lock(&ha->optrom_mutex);
1468
	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1469 1470
	if (rval) {
		mutex_unlock(&ha->optrom_mutex);
1471
		return rval;
1472
	}
1473 1474 1475 1476 1477 1478 1479 1480

	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
	    ha->optrom_region_start, ha->optrom_region_size);

	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
	    ha->optrom_region_size);

1481 1482
	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
	bsg_reply->result = DID_OK;
1483 1484 1485
	vfree(ha->optrom_buffer);
	ha->optrom_buffer = NULL;
	ha->optrom_state = QLA_SWAITING;
1486
	mutex_unlock(&ha->optrom_mutex);
J
Johannes Thumshirn 已提交
1487
	bsg_job_done(bsg_job, bsg_reply->result,
1488
		       bsg_reply->reply_payload_rcv_len);
1489 1490 1491 1492
	return rval;
}

static int
1493
qla2x00_update_optrom(struct bsg_job *bsg_job)
1494
{
1495
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1496
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1497 1498 1499 1500
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;

1501
	mutex_lock(&ha->optrom_mutex);
1502
	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1503 1504
	if (rval) {
		mutex_unlock(&ha->optrom_mutex);
1505
		return rval;
1506
	}
1507

1508 1509 1510
	/* Set the isp82xx_no_md_cap not to capture minidump */
	ha->flags.isp82xx_no_md_cap = 1;

1511 1512 1513 1514 1515 1516 1517
	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
	    ha->optrom_region_size);

	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
	    ha->optrom_region_start, ha->optrom_region_size);

1518
	bsg_reply->result = DID_OK;
1519 1520 1521
	vfree(ha->optrom_buffer);
	ha->optrom_buffer = NULL;
	ha->optrom_state = QLA_SWAITING;
1522
	mutex_unlock(&ha->optrom_mutex);
J
Johannes Thumshirn 已提交
1523
	bsg_job_done(bsg_job, bsg_reply->result,
1524
		       bsg_reply->reply_payload_rcv_len);
1525 1526 1527
	return rval;
}

1528
static int
1529
qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1530
{
1531
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1532
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint8_t bsg[DMA_POOL_SIZE];
	struct qla_image_version_list *list = (void *)bsg;
	struct qla_image_version *image;
	uint32_t count;
	dma_addr_t sfp_dma;
	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
	if (!sfp) {
1543
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
		    EXT_STATUS_NO_MEMORY;
		goto done;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));

	image = list->version;
	count = list->count;
	while (count--) {
		memcpy(sfp, &image->field_info, sizeof(image->field_info));
		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
		    image->field_address.device, image->field_address.offset,
		    sizeof(image->field_info), image->field_address.option);
		if (rval) {
1559
			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1560 1561 1562 1563 1564 1565
			    EXT_STATUS_MAILBOX;
			goto dealloc;
		}
		image++;
	}

1566
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1567 1568 1569 1570 1571 1572

dealloc:
	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);

done:
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1573
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
1574
	bsg_job_done(bsg_job, bsg_reply->result,
1575
		       bsg_reply->reply_payload_rcv_len);
1576 1577 1578 1579 1580

	return 0;
}

static int
1581
qla2x00_read_fru_status(struct bsg_job *bsg_job)
1582
{
1583
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1584
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1585 1586 1587 1588 1589 1590 1591 1592
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint8_t bsg[DMA_POOL_SIZE];
	struct qla_status_reg *sr = (void *)bsg;
	dma_addr_t sfp_dma;
	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
	if (!sfp) {
1593
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
		    EXT_STATUS_NO_MEMORY;
		goto done;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));

	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
	    sr->field_address.device, sr->field_address.offset,
	    sizeof(sr->status_reg), sr->field_address.option);
	sr->status_reg = *sfp;

	if (rval) {
1607
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1608 1609 1610 1611 1612 1613 1614
		    EXT_STATUS_MAILBOX;
		goto dealloc;
	}

	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));

1615
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1616 1617 1618 1619 1620 1621

dealloc:
	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);

done:
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1622 1623
	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
1624
	bsg_job_done(bsg_job, bsg_reply->result,
1625
		       bsg_reply->reply_payload_rcv_len);
1626 1627 1628 1629 1630

	return 0;
}

static int
1631
qla2x00_write_fru_status(struct bsg_job *bsg_job)
1632
{
1633
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1634
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1635 1636 1637 1638 1639 1640 1641 1642
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint8_t bsg[DMA_POOL_SIZE];
	struct qla_status_reg *sr = (void *)bsg;
	dma_addr_t sfp_dma;
	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
	if (!sfp) {
1643
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
		    EXT_STATUS_NO_MEMORY;
		goto done;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));

	*sfp = sr->status_reg;
	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
	    sr->field_address.device, sr->field_address.offset,
	    sizeof(sr->status_reg), sr->field_address.option);

	if (rval) {
1657
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1658 1659 1660 1661
		    EXT_STATUS_MAILBOX;
		goto dealloc;
	}

1662
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1663 1664 1665 1666 1667 1668

dealloc:
	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);

done:
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
1670
	bsg_job_done(bsg_job, bsg_reply->result,
1671
		       bsg_reply->reply_payload_rcv_len);
1672 1673 1674 1675

	return 0;
}

1676
static int
1677
qla2x00_write_i2c(struct bsg_job *bsg_job)
1678
{
1679
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1681 1682 1683 1684 1685 1686 1687 1688
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint8_t bsg[DMA_POOL_SIZE];
	struct qla_i2c_access *i2c = (void *)bsg;
	dma_addr_t sfp_dma;
	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
	if (!sfp) {
1689
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
		    EXT_STATUS_NO_MEMORY;
		goto done;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));

	memcpy(sfp, i2c->buffer, i2c->length);
	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
	    i2c->device, i2c->offset, i2c->length, i2c->option);

	if (rval) {
1702
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1703 1704 1705 1706
		    EXT_STATUS_MAILBOX;
		goto dealloc;
	}

1707
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1708 1709 1710 1711 1712 1713

dealloc:
	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);

done:
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1714
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
1715
	bsg_job_done(bsg_job, bsg_reply->result,
1716
		       bsg_reply->reply_payload_rcv_len);
1717 1718 1719 1720 1721

	return 0;
}

static int
1722
qla2x00_read_i2c(struct bsg_job *bsg_job)
1723
{
1724
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1725
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1726 1727 1728 1729 1730 1731 1732 1733
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = 0;
	uint8_t bsg[DMA_POOL_SIZE];
	struct qla_i2c_access *i2c = (void *)bsg;
	dma_addr_t sfp_dma;
	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
	if (!sfp) {
1734
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
		    EXT_STATUS_NO_MEMORY;
		goto done;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));

	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
		i2c->device, i2c->offset, i2c->length, i2c->option);

	if (rval) {
1746
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1747 1748 1749 1750 1751 1752 1753 1754
		    EXT_STATUS_MAILBOX;
		goto dealloc;
	}

	memcpy(i2c->buffer, sfp, i2c->length);
	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));

1755
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1756 1757 1758 1759 1760 1761

dealloc:
	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);

done:
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1762 1763
	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
1764
	bsg_job_done(bsg_job, bsg_reply->result,
1765
		       bsg_reply->reply_payload_rcv_len);
1766 1767 1768 1769

	return 0;
}

1770
static int
1771
qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1772
{
1773
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1774
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	uint32_t rval = EXT_STATUS_OK;
	uint16_t req_sg_cnt = 0;
	uint16_t rsp_sg_cnt = 0;
	uint16_t nextlid = 0;
	uint32_t tot_dsds;
	srb_t *sp = NULL;
	uint32_t req_data_len = 0;
	uint32_t rsp_data_len = 0;

	/* Check the type of the adapter */
	if (!IS_BIDI_CAPABLE(ha)) {
		ql_log(ql_log_warn, vha, 0x70a0,
			"This adapter is not supported\n");
		rval = EXT_STATUS_NOT_SUPPORTED;
		goto done;
	}

	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
		rval =  EXT_STATUS_BUSY;
		goto done;
	}

	/* Check if host is online */
	if (!vha->flags.online) {
		ql_log(ql_log_warn, vha, 0x70a1,
			"Host is not online\n");
		rval = EXT_STATUS_DEVICE_OFFLINE;
		goto done;
	}

	/* Check if cable is plugged in or not */
	if (vha->device_flags & DFLG_NO_CABLE) {
		ql_log(ql_log_warn, vha, 0x70a2,
			"Cable is unplugged...\n");
		rval = EXT_STATUS_INVALID_CFG;
		goto done;
	}

	/* Check if the switch is connected or not */
	if (ha->current_topology != ISP_CFG_F) {
		ql_log(ql_log_warn, vha, 0x70a3,
			"Host is not connected to the switch\n");
		rval = EXT_STATUS_INVALID_CFG;
		goto done;
	}

	/* Check if operating mode is P2P */
	if (ha->operating_mode != P2P) {
		ql_log(ql_log_warn, vha, 0x70a4,
		    "Host is operating mode is not P2p\n");
		rval = EXT_STATUS_INVALID_CFG;
		goto done;
	}

	mutex_lock(&ha->selflogin_lock);
	if (vha->self_login_loop_id == 0) {
		/* Initialize all required  fields of fcport */
		vha->bidir_fcport.vha = vha;
		vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
		vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
		vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
		vha->bidir_fcport.loop_id = vha->loop_id;

		if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
			ql_log(ql_log_warn, vha, 0x70a7,
			    "Failed to login port %06X for bidirectional IOCB\n",
			    vha->bidir_fcport.d_id.b24);
			mutex_unlock(&ha->selflogin_lock);
			rval = EXT_STATUS_MAILBOX;
			goto done;
		}
		vha->self_login_loop_id = nextlid - 1;

	}
	/* Assign the self login loop id to fcport */
	mutex_unlock(&ha->selflogin_lock);

	vha->bidir_fcport.loop_id = vha->self_login_loop_id;

	req_sg_cnt = dma_map_sg(&ha->pdev->dev,
		bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt,
		DMA_TO_DEVICE);

	if (!req_sg_cnt) {
		rval = EXT_STATUS_NO_MEMORY;
		goto done;
	}

	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
		DMA_FROM_DEVICE);

	if (!rsp_sg_cnt) {
		rval = EXT_STATUS_NO_MEMORY;
		goto done_unmap_req_sg;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
		ql_dbg(ql_dbg_user, vha, 0x70a9,
		    "Dma mapping resulted in different sg counts "
		    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
		    "%x dma_reply_sg_cnt: %x]\n",
		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
		rval = EXT_STATUS_NO_MEMORY;
		goto done_unmap_sg;
	}

	if (req_data_len != rsp_data_len) {
		rval = EXT_STATUS_BUSY;
		ql_log(ql_log_warn, vha, 0x70aa,
		    "req_data_len != rsp_data_len\n");
		goto done_unmap_sg;
	}

	req_data_len = bsg_job->request_payload.payload_len;
	rsp_data_len = bsg_job->reply_payload.payload_len;


	/* Alloc SRB structure */
	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
	if (!sp) {
		ql_dbg(ql_dbg_user, vha, 0x70ac,
		    "Alloc SRB structure failed\n");
		rval = EXT_STATUS_NO_MEMORY;
		goto done_unmap_sg;
	}

	/*Populate srb->ctx with bidir ctx*/
	sp->u.bsg_job = bsg_job;
	sp->free = qla2x00_bsg_sp_free;
	sp->type = SRB_BIDI_CMD;
	sp->done = qla2x00_bsg_job_done;

	/* Add the read and write sg count */
	tot_dsds = rsp_sg_cnt + req_sg_cnt;

	rval = qla2x00_start_bidir(sp, vha, tot_dsds);
	if (rval != EXT_STATUS_OK)
		goto done_free_srb;
	/* the bsg request  will be completed in the interrupt handler */
	return rval;

done_free_srb:
	mempool_free(sp, ha->srb_mempool);
done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done_unmap_req_sg:
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
done:

	/* Return an error vendor specific response
	 * and complete the bsg request
	 */
1939
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1940
	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1941 1942
	bsg_reply->reply_payload_rcv_len = 0;
	bsg_reply->result = (DID_OK) << 16;
J
Johannes Thumshirn 已提交
1943
	bsg_job_done(bsg_job, bsg_reply->result,
1944
		       bsg_reply->reply_payload_rcv_len);
1945
	/* Always return success, vendor rsp carries correct status */
1946 1947 1948
	return 0;
}

1949
static int
1950
qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1951
{
1952
	struct fc_bsg_request *bsg_request = bsg_job->request;
1953
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = (DRIVER_ERROR << 16);
	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
	srb_t *sp;
	int req_sg_cnt = 0, rsp_sg_cnt = 0;
	struct fc_port *fcport;
	char  *type = "FC_BSG_HST_FX_MGMT";

	/* Copy the IOCB specific information */
	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1965
	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071

	/* Dump the vendor information */
	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
	    (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));

	if (!vha->flags.online) {
		ql_log(ql_log_warn, vha, 0x70d0,
		    "Host is not online.\n");
		rval = -EIO;
		goto done;
	}

	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
		    bsg_job->request_payload.sg_list,
		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
		if (!req_sg_cnt) {
			ql_log(ql_log_warn, vha, 0x70c7,
			    "dma_map_sg return %d for request\n", req_sg_cnt);
			rval = -ENOMEM;
			goto done;
		}
	}

	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
		    bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
		if (!rsp_sg_cnt) {
			ql_log(ql_log_warn, vha, 0x70c8,
			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
			rval = -ENOMEM;
			goto done_unmap_req_sg;
		}
	}

	ql_dbg(ql_dbg_user, vha, 0x70c9,
	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);

	/* Allocate a dummy fcport structure, since functions preparing the
	 * IOCB and mailbox command retrieves port specific information
	 * from fcport structure. For Host based ELS commands there will be
	 * no fcport structure allocated
	 */
	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
	if (!fcport) {
		ql_log(ql_log_warn, vha, 0x70ca,
		    "Failed to allocate fcport.\n");
		rval = -ENOMEM;
		goto done_unmap_rsp_sg;
	}

	/* Alloc SRB structure */
	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
	if (!sp) {
		ql_log(ql_log_warn, vha, 0x70cb,
		    "qla2x00_get_sp failed.\n");
		rval = -ENOMEM;
		goto done_free_fcport;
	}

	/* Initialize all required  fields of fcport */
	fcport->vha = vha;
	fcport->loop_id = piocb_rqst->dataword;

	sp->type = SRB_FXIOCB_BCMD;
	sp->name = "bsg_fx_mgmt";
	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
	sp->u.bsg_job = bsg_job;
	sp->free = qla2x00_bsg_sp_free;
	sp->done = qla2x00_bsg_job_done;

	ql_dbg(ql_dbg_user, vha, 0x70cc,
	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
	    type, piocb_rqst->func_type, fcport->loop_id);

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		ql_log(ql_log_warn, vha, 0x70cd,
		    "qla2x00_start_sp failed=%d.\n", rval);
		mempool_free(sp, ha->srb_mempool);
		rval = -EIO;
		goto done_free_fcport;
	}
	return rval;

done_free_fcport:
	kfree(fcport);

done_unmap_rsp_sg:
	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
		dma_unmap_sg(&ha->pdev->dev,
		    bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done_unmap_req_sg:
	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
		dma_unmap_sg(&ha->pdev->dev,
		    bsg_job->request_payload.sg_list,
		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);

done:
	return rval;
}

2072
static int
2073
qla26xx_serdes_op(struct bsg_job *bsg_job)
2074
{
2075
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2076
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
	scsi_qla_host_t *vha = shost_priv(host);
	int rval = 0;
	struct qla_serdes_reg sr;

	memset(&sr, 0, sizeof(sr));

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));

	switch (sr.cmd) {
	case INT_SC_SERDES_WRITE_REG:
		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2089
		bsg_reply->reply_payload_rcv_len = 0;
2090 2091 2092 2093 2094
		break;
	case INT_SC_SERDES_READ_REG:
		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2095
		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2096 2097
		break;
	default:
2098
		ql_dbg(ql_dbg_user, vha, 0x708c,
2099
		    "Unknown serdes cmd %x.\n", sr.cmd);
2100 2101 2102 2103
		rval = -EINVAL;
		break;
	}

2104
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2105 2106 2107
	    rval ? EXT_STATUS_MAILBOX : 0;

	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2108
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2109
	bsg_job_done(bsg_job, bsg_reply->result,
2110
		       bsg_reply->reply_payload_rcv_len);
2111 2112 2113 2114
	return 0;
}

static int
2115
qla8044_serdes_op(struct bsg_job *bsg_job)
2116
{
2117
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2118
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
	scsi_qla_host_t *vha = shost_priv(host);
	int rval = 0;
	struct qla_serdes_reg_ex sr;

	memset(&sr, 0, sizeof(sr));

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));

	switch (sr.cmd) {
	case INT_SC_SERDES_WRITE_REG:
		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2131
		bsg_reply->reply_payload_rcv_len = 0;
2132 2133 2134 2135 2136
		break;
	case INT_SC_SERDES_READ_REG:
		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2137
		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2138 2139 2140 2141 2142
		break;
	default:
		ql_dbg(ql_dbg_user, vha, 0x70cf,
		    "Unknown serdes cmd %x.\n", sr.cmd);
		rval = -EINVAL;
2143 2144 2145
		break;
	}

2146
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2147 2148 2149
	    rval ? EXT_STATUS_MAILBOX : 0;

	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2150
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2151
	bsg_job_done(bsg_job, bsg_reply->result,
2152
		       bsg_reply->reply_payload_rcv_len);
2153 2154 2155
	return 0;
}

2156
static int
2157
qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2158
{
2159
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2160
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	struct qla_flash_update_caps cap;

	if (!(IS_QLA27XX(ha)))
		return -EPERM;

	memset(&cap, 0, sizeof(cap));
	cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
			   (uint64_t)ha->fw_attributes_ext[0] << 32 |
			   (uint64_t)ha->fw_attributes_h << 16 |
			   (uint64_t)ha->fw_attributes;

	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2176
	bsg_reply->reply_payload_rcv_len = sizeof(cap);
2177

2178
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2179 2180 2181
	    EXT_STATUS_OK;

	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2182
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2183
	bsg_job_done(bsg_job, bsg_reply->result,
2184
		       bsg_reply->reply_payload_rcv_len);
2185 2186 2187 2188
	return 0;
}

static int
2189
qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2190
{
2191
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2192
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	uint64_t online_fw_attr = 0;
	struct qla_flash_update_caps cap;

	if (!(IS_QLA27XX(ha)))
		return -EPERM;

	memset(&cap, 0, sizeof(cap));
	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));

	online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
			 (uint64_t)ha->fw_attributes_ext[0] << 32 |
			 (uint64_t)ha->fw_attributes_h << 16 |
			 (uint64_t)ha->fw_attributes;

	if (online_fw_attr != cap.capabilities) {
2211
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2212 2213 2214 2215 2216
		    EXT_STATUS_INVALID_PARAM;
		return -EINVAL;
	}

	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2217
		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2218 2219 2220 2221
		    EXT_STATUS_INVALID_PARAM;
		return -EINVAL;
	}

2222
	bsg_reply->reply_payload_rcv_len = 0;
2223

2224
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2225 2226 2227
	    EXT_STATUS_OK;

	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2228
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2229
	bsg_job_done(bsg_job, bsg_reply->result,
2230
		       bsg_reply->reply_payload_rcv_len);
2231 2232 2233
	return 0;
}

2234
static int
2235
qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2236
{
2237
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2238
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	struct qla_bbcr_data bbcr;
	uint16_t loop_id, topo, sw_cap;
	uint8_t domain, area, al_pa, state;
	int rval;

	if (!(IS_QLA27XX(ha)))
		return -EPERM;

	memset(&bbcr, 0, sizeof(bbcr));

	if (vha->flags.bbcr_enable)
		bbcr.status = QLA_BBCR_STATUS_ENABLED;
	else
		bbcr.status = QLA_BBCR_STATUS_DISABLED;

	if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
		rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
			&area, &domain, &topo, &sw_cap);
2259 2260 2261 2262 2263 2264
		if (rval != QLA_SUCCESS) {
			bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
			bbcr.state = QLA_BBCR_STATE_OFFLINE;
			bbcr.mbx1 = loop_id;
			goto done;
		}
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278

		state = (vha->bbcr >> 12) & 0x1;

		if (state) {
			bbcr.state = QLA_BBCR_STATE_OFFLINE;
			bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
		} else {
			bbcr.state = QLA_BBCR_STATE_ONLINE;
			bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
		}

		bbcr.configured_bbscn = vha->bbcr & 0xf;
	}

2279
done:
2280 2281
	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2282
	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2283

2284
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2285 2286

	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2287
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2288
	bsg_job_done(bsg_job, bsg_reply->result,
2289
		       bsg_reply->reply_payload_rcv_len);
2290 2291 2292
	return 0;
}

2293
static int
2294
qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2295
{
2296 2297
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2298
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2299 2300 2301 2302 2303
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
	struct link_statistics *stats = NULL;
	dma_addr_t stats_dma;
2304
	int rval;
2305
	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2306
	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2307 2308

	if (test_bit(UNLOADING, &vha->dpc_flags))
2309
		return -ENODEV;
2310 2311

	if (unlikely(pci_channel_offline(ha->pdev)))
2312
		return -ENODEV;
2313 2314

	if (qla2x00_reset_active(vha))
2315
		return -EBUSY;
2316 2317

	if (!IS_FWI2_CAPABLE(ha))
2318
		return -EPERM;
2319 2320

	stats = dma_alloc_coherent(&ha->pdev->dev,
2321
		sizeof(*stats), &stats_dma, GFP_KERNEL);
2322 2323
	if (!stats) {
		ql_log(ql_log_warn, vha, 0x70e2,
2324 2325
		    "Failed to allocate memory for stats.\n");
		return -ENOMEM;
2326 2327
	}

2328
	memset(stats, 0, sizeof(*stats));
2329

2330
	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2331

2332 2333 2334 2335 2336 2337
	if (rval == QLA_SUCCESS) {
		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
		    (uint8_t *)stats, sizeof(*stats));
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
	}
2338

2339 2340
	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2341
	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2342

2343 2344
	bsg_job->reply_len = sizeof(*bsg_reply);
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2345
	bsg_job_done(bsg_job, bsg_reply->result,
2346
		       bsg_reply->reply_payload_rcv_len);
2347

2348
	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2349
		stats, stats_dma);
2350 2351

	return 0;
2352 2353
}

2354
static int
2355
qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2356
{
2357
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2358
	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
	scsi_qla_host_t *vha = shost_priv(host);
	int rval;
	struct qla_dport_diag *dd;

	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
		return -EPERM;

	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
	if (!dd) {
		ql_log(ql_log_warn, vha, 0x70db,
		    "Failed to allocate memory for dport.\n");
		return -ENOMEM;
	}

	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));

	rval = qla26xx_dport_diagnostics(
	    vha, dd->buf, sizeof(dd->buf), dd->options);
	if (rval == QLA_SUCCESS) {
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
	}

2383 2384
	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2385 2386
	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;

2387 2388
	bsg_job->reply_len = sizeof(*bsg_reply);
	bsg_reply->result = DID_OK << 16;
J
Johannes Thumshirn 已提交
2389
	bsg_job_done(bsg_job, bsg_reply->result,
2390
		       bsg_reply->reply_payload_rcv_len);
2391 2392 2393 2394 2395 2396

	kfree(dd);

	return 0;
}

2397
static int
2398
qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2399
{
2400 2401 2402
	struct fc_bsg_request *bsg_request = bsg_job->request;

	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
	case QL_VND_LOOPBACK:
		return qla2x00_process_loopback(bsg_job);

	case QL_VND_A84_RESET:
		return qla84xx_reset(bsg_job);

	case QL_VND_A84_UPDATE_FW:
		return qla84xx_updatefw(bsg_job);

	case QL_VND_A84_MGMT_CMD:
		return qla84xx_mgmt_cmd(bsg_job);

	case QL_VND_IIDMA:
		return qla24xx_iidma(bsg_job);

S
Sarang Radke 已提交
2418 2419 2420
	case QL_VND_FCP_PRIO_CFG_CMD:
		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);

2421 2422 2423 2424 2425 2426
	case QL_VND_READ_FLASH:
		return qla2x00_read_optrom(bsg_job);

	case QL_VND_UPDATE_FLASH:
		return qla2x00_update_optrom(bsg_job);

2427 2428 2429 2430 2431 2432 2433 2434 2435
	case QL_VND_SET_FRU_VERSION:
		return qla2x00_update_fru_versions(bsg_job);

	case QL_VND_READ_FRU_STATUS:
		return qla2x00_read_fru_status(bsg_job);

	case QL_VND_WRITE_FRU_STATUS:
		return qla2x00_write_fru_status(bsg_job);

2436 2437 2438 2439 2440 2441
	case QL_VND_WRITE_I2C:
		return qla2x00_write_i2c(bsg_job);

	case QL_VND_READ_I2C:
		return qla2x00_read_i2c(bsg_job);

2442 2443 2444
	case QL_VND_DIAG_IO_CMD:
		return qla24xx_process_bidir_cmd(bsg_job);

2445 2446
	case QL_VND_FX00_MGMT_CMD:
		return qlafx00_mgmt_cmd(bsg_job);
2447 2448 2449 2450

	case QL_VND_SERDES_OP:
		return qla26xx_serdes_op(bsg_job);

2451 2452 2453
	case QL_VND_SERDES_OP_EX:
		return qla8044_serdes_op(bsg_job);

2454 2455 2456 2457 2458 2459
	case QL_VND_GET_FLASH_UPDATE_CAPS:
		return qla27xx_get_flash_upd_cap(bsg_job);

	case QL_VND_SET_FLASH_UPDATE_CAPS:
		return qla27xx_set_flash_upd_cap(bsg_job);

2460 2461 2462
	case QL_VND_GET_BBCR_DATA:
		return qla27xx_get_bbcr_data(bsg_job);

2463
	case QL_VND_GET_PRIV_STATS:
2464
	case QL_VND_GET_PRIV_STATS_EX:
2465 2466
		return qla2x00_get_priv_stats(bsg_job);

2467 2468 2469
	case QL_VND_DPORT_DIAGNOSTICS:
		return qla2x00_do_dport_diagnostics(bsg_job);

2470 2471 2472 2473 2474 2475
	default:
		return -ENOSYS;
	}
}

int
2476
qla24xx_bsg_request(struct bsg_job *bsg_job)
2477
{
2478 2479
	struct fc_bsg_request *bsg_request = bsg_job->request;
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2480
	int ret = -EINVAL;
2481 2482 2483 2484
	struct fc_rport *rport;
	struct Scsi_Host *host;
	scsi_qla_host_t *vha;

2485
	/* In case no data transferred. */
2486
	bsg_reply->reply_payload_rcv_len = 0;
2487

2488
	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2489
		rport = fc_bsg_to_rport(bsg_job);
2490 2491 2492
		host = rport_to_shost(rport);
		vha = shost_priv(host);
	} else {
2493
		host = fc_bsg_to_shost(bsg_job);
2494 2495 2496
		vha = shost_priv(host);
	}

2497 2498 2499
	if (qla2x00_reset_active(vha)) {
		ql_dbg(ql_dbg_user, vha, 0x709f,
		    "BSG: ISP abort active/needed -- cmd=%d.\n",
2500
		    bsg_request->msgcode);
2501 2502 2503
		return -EBUSY;
	}

2504
	ql_dbg(ql_dbg_user, vha, 0x7000,
2505
	    "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2506

2507
	switch (bsg_request->msgcode) {
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
	case FC_BSG_RPT_ELS:
	case FC_BSG_HST_ELS_NOLOGIN:
		ret = qla2x00_process_els(bsg_job);
		break;
	case FC_BSG_HST_CT:
		ret = qla2x00_process_ct(bsg_job);
		break;
	case FC_BSG_HST_VENDOR:
		ret = qla2x00_process_vendor_specific(bsg_job);
		break;
	case FC_BSG_HST_ADD_RPORT:
	case FC_BSG_HST_DEL_RPORT:
	case FC_BSG_RPT_CT:
	default:
2522
		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2523
		break;
2524
	}
2525 2526 2527 2528
	return ret;
}

int
2529
qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2530
{
2531
	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2532
	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
	struct qla_hw_data *ha = vha->hw;
	srb_t *sp;
	int cnt, que;
	unsigned long flags;
	struct req_que *req;

	/* find the bsg job from the active list of commands */
	spin_lock_irqsave(&ha->hardware_lock, flags);
	for (que = 0; que < ha->max_req_queues; que++) {
		req = ha->req_q_map[que];
		if (!req)
			continue;

2546
		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2547 2548
			sp = req->outstanding_cmds[cnt];
			if (sp) {
2549
				if (((sp->type == SRB_CT_CMD) ||
2550 2551
					(sp->type == SRB_ELS_CMD_HST) ||
					(sp->type == SRB_FXIOCB_BCMD))
2552
					&& (sp->u.bsg_job == bsg_job)) {
2553
					req->outstanding_cmds[cnt] = NULL;
2554
					spin_unlock_irqrestore(&ha->hardware_lock, flags);
2555
					if (ha->isp_ops->abort_command(sp)) {
2556 2557 2558
						ql_log(ql_log_warn, vha, 0x7089,
						    "mbx abort_command "
						    "failed.\n");
2559
						bsg_job->req->errors =
2560
						bsg_reply->result = -EIO;
2561
					} else {
2562 2563 2564
						ql_dbg(ql_dbg_user, vha, 0x708a,
						    "mbx abort_command "
						    "success.\n");
2565
						bsg_job->req->errors =
2566
						bsg_reply->result = 0;
2567
					}
2568
					spin_lock_irqsave(&ha->hardware_lock, flags);
2569 2570 2571 2572 2573 2574
					goto done;
				}
			}
		}
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575
	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2576
	bsg_job->req->errors = bsg_reply->result = -ENXIO;
2577 2578 2579 2580
	return 0;

done:
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2581
	sp->free(vha, sp);
2582 2583
	return 0;
}