be_cmds.c 105.4 KB
Newer Older
S
Sathya Perla 已提交
1
/*
2
 * Copyright (C) 2005 - 2015 Emulex
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
S
Sathya Perla 已提交
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
S
Sathya Perla 已提交
16 17
 */

18
#include <linux/module.h>
S
Sathya Perla 已提交
19
#include "be.h"
20
#include "be_cmds.h"
S
Sathya Perla 已提交
21

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
static char *be_port_misconfig_evt_desc[] = {
	"A valid SFP module detected",
	"Optics faulted/ incorrectly installed/ not installed.",
	"Optics of two types installed.",
	"Incompatible optics.",
	"Unknown port SFP status"
};

static char *be_port_misconfig_remedy_desc[] = {
	"",
	"Reseat optics. If issue not resolved, replace",
	"Remove one optic or install matching pair of optics",
	"Replace with compatible optics for card to function",
	""
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static struct be_cmd_priv_map cmd_priv_map[] = {
	{
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_SET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_ETH_GET_PPORT_STATS,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_PHY_DETAILS,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	}
};

71
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
72 73 74 75 76 77 78 79 80 81 82 83 84 85
{
	int i;
	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
	u32 cmd_privileges = adapter->cmd_privileges;

	for (i = 0; i < num_entries; i++)
		if (opcode == cmd_priv_map[i].opcode &&
		    subsystem == cmd_priv_map[i].subsystem)
			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
				return false;

	return true;
}

86 87 88 89
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}
90

91
static int be_mcc_notify(struct be_adapter *adapter)
92
{
93
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
94 95
	u32 val = 0;

96
	if (be_check_error(adapter, BE_ERROR_ANY))
97
		return -EIO;
98

99 100
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
101 102

	wmb();
103
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
104 105

	return 0;
106 107 108 109 110
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
111
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
112
{
113 114
	u32 flags;

115
	if (compl->flags != 0) {
116 117 118 119 120
		flags = le32_to_cpu(compl->flags);
		if (flags & CQE_FLAGS_VALID_MASK) {
			compl->flags = flags;
			return true;
		}
121
	}
122
	return false;
123 124 125
}

/* Need to reset the entire word that houses the valid bit */
126
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
127 128 129 130
{
	compl->flags = 0;
}

131 132 133 134 135 136 137 138 139
static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
{
	unsigned long addr;

	addr = tag1;
	addr = ((addr << 16) << 16) | tag0;
	return (void *)addr;
}

140 141 142 143 144
static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
{
	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
145
	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
146 147 148 149 150 151 152 153
	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
		return true;
	else
		return false;
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* Place holder for all the async MCC cmds wherein the caller is not in a busy
 * loop (has not issued be_mcc_notify_wait())
 */
static void be_async_cmd_process(struct be_adapter *adapter,
				 struct be_mcc_compl *compl,
				 struct be_cmd_resp_hdr *resp_hdr)
{
	enum mcc_base_status base_status = base_status(compl->status);
	u8 opcode = 0, subsystem = 0;

	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
		complete(&adapter->et_cmd_compl);
		return;
	}

175 176 177 178 179 180
	if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
		complete(&adapter->et_cmd_compl);
		return;
	}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
	    subsystem == CMD_SUBSYSTEM_COMMON) {
		adapter->flash_status = compl->status;
		complete(&adapter->et_cmd_compl);
		return;
	}

	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
	    subsystem == CMD_SUBSYSTEM_ETH &&
	    base_status == MCC_STATUS_SUCCESS) {
		be_parse_stats(adapter);
		adapter->stats_cmd_sent = false;
		return;
	}

	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
	    subsystem == CMD_SUBSYSTEM_COMMON) {
		if (base_status == MCC_STATUS_SUCCESS) {
			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
							(void *)resp_hdr;
203
			adapter->hwmon_info.be_on_die_temp =
204 205 206
						resp->on_die_temperature;
		} else {
			adapter->be_get_temp_freq = 0;
207 208
			adapter->hwmon_info.be_on_die_temp =
						BE_INVALID_DIE_TEMP;
209 210 211 212 213
		}
		return;
	}
}

214
static int be_mcc_compl_process(struct be_adapter *adapter,
215
				struct be_mcc_compl *compl)
216
{
217 218
	enum mcc_base_status base_status;
	enum mcc_addl_status addl_status;
219 220
	struct be_cmd_resp_hdr *resp_hdr;
	u8 opcode = 0, subsystem = 0;
221 222 223 224 225

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

226 227
	base_status = base_status(compl->status);
	addl_status = addl_status(compl->status);
228

229 230 231 232 233 234
	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

235
	be_async_cmd_process(adapter, compl, resp_hdr);
236

237 238
	if (base_status != MCC_STATUS_SUCCESS &&
	    !be_skip_err_log(opcode, base_status, addl_status)) {
239
		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
240
			dev_warn(&adapter->pdev->dev,
241
				 "VF is not privileged to issue opcode %d-%d\n",
242
				 opcode, subsystem);
243
		} else {
244 245
			dev_err(&adapter->pdev->dev,
				"opcode %d-%d failed:status %d-%d\n",
246
				opcode, subsystem, base_status, addl_status);
247
		}
248
	}
249
	return compl->status;
250 251
}

252
/* Link state evt is a string of bytes; no need for endian swapping */
253
static void be_async_link_state_process(struct be_adapter *adapter,
254
					struct be_mcc_compl *compl)
255
{
256 257 258
	struct be_async_event_link_state *evt =
			(struct be_async_event_link_state *)compl;

259
	/* When link status changes, link speed must be re-queried from FW */
A
Ajit Khaparde 已提交
260
	adapter->phy.link_speed = -1;
261

262 263 264 265 266 267
	/* On BEx the FW does not send a separate link status
	 * notification for physical and logical link.
	 * On other chips just process the logical link
	 * status notification
	 */
	if (!BEx_chip(adapter) &&
268 269 270
	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
		return;

271 272 273 274
	/* For the initial link status do not rely on the ASYNC event as
	 * it may not be received in some cases.
	 */
	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
275 276
		be_link_status_update(adapter,
				      evt->port_link_status & LINK_STATUS_MASK);
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
						  struct be_mcc_compl *compl)
{
	struct be_async_event_misconfig_port *evt =
			(struct be_async_event_misconfig_port *)compl;
	u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
	struct device *dev = &adapter->pdev->dev;
	u8 port_misconfig_evt;

	port_misconfig_evt =
		((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);

	/* Log an error message that would allow a user to determine
	 * whether the SFPs have an issue
	 */
	dev_info(dev, "Port %c: %s %s", adapter->port_name,
		 be_port_misconfig_evt_desc[port_misconfig_evt],
		 be_port_misconfig_remedy_desc[port_misconfig_evt]);

	if (port_misconfig_evt == INCOMPATIBLE_SFP)
		adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
}

302 303
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
304
					       struct be_mcc_compl *compl)
305
{
306 307 308
	struct be_async_event_grp5_cos_priority *evt =
			(struct be_async_event_grp5_cos_priority *)compl;

309 310
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
311
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
312 313 314 315 316
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

317
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
318
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
319
					    struct be_mcc_compl *compl)
320
{
321 322 323
	struct be_async_event_grp5_qos_link_speed *evt =
			(struct be_async_event_grp5_qos_link_speed *)compl;

324 325 326
	if (adapter->phy.link_speed >= 0 &&
	    evt->physical_port == adapter->port_num)
		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
327 328
}

329 330
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
331
					     struct be_mcc_compl *compl)
332
{
333 334 335
	struct be_async_event_grp5_pvid_state *evt =
			(struct be_async_event_grp5_pvid_state *)compl;

336
	if (evt->enabled) {
337
		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
338 339
		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
	} else {
340
		adapter->pvid = 0;
341
	}
342 343
}

V
Venkata Duvvuru 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
#define MGMT_ENABLE_MASK	0x4
static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
					     struct be_mcc_compl *compl)
{
	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);

	if (evt_dw1 & MGMT_ENABLE_MASK) {
		adapter->flags |= BE_FLAGS_OS2BMC;
		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
	} else {
		adapter->flags &= ~BE_FLAGS_OS2BMC;
	}
}

359
static void be_async_grp5_evt_process(struct be_adapter *adapter,
360
				      struct be_mcc_compl *compl)
361
{
362 363
	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
				ASYNC_EVENT_TYPE_MASK;
364 365 366

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
367 368
		be_async_grp5_cos_priority_process(adapter, compl);
		break;
369
	case ASYNC_EVENT_QOS_SPEED:
370 371
		be_async_grp5_qos_speed_process(adapter, compl);
		break;
372
	case ASYNC_EVENT_PVID_STATE:
373 374
		be_async_grp5_pvid_state_process(adapter, compl);
		break;
V
Venkata Duvvuru 已提交
375 376 377 378
	/* Async event to disable/enable os2bmc and/or mac-learning */
	case ASYNC_EVENT_FW_CONTROL:
		be_async_grp5_fw_control_process(adapter, compl);
		break;
379 380 381 382 383
	default:
		break;
	}
}

384
static void be_async_dbg_evt_process(struct be_adapter *adapter,
385
				     struct be_mcc_compl *cmp)
386 387
{
	u8 event_type = 0;
K
Kalesh AP 已提交
388
	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
389

390 391
	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
			ASYNC_EVENT_TYPE_MASK;
392 393 394 395 396 397 398 399

	switch (event_type) {
	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
		if (evt->valid)
			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
	break;
	default:
400 401
		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
			 event_type);
402 403 404 405
	break;
	}
}

406 407 408 409 410 411 412 413 414 415
static void be_async_sliport_evt_process(struct be_adapter *adapter,
					 struct be_mcc_compl *cmp)
{
	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
			ASYNC_EVENT_TYPE_MASK;

	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
		be_async_port_misconfig_event_process(adapter, cmp);
}

416
static inline bool is_link_state_evt(u32 flags)
417
{
418 419
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_LINK_STATE;
420
}
421

422
static inline bool is_grp5_evt(u32 flags)
423
{
424 425
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_GRP_5;
426 427
}

428
static inline bool is_dbg_evt(u32 flags)
429
{
430 431 432 433
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_QNQ;
}

434 435 436 437 438 439
static inline bool is_sliport_evt(u32 flags)
{
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
		ASYNC_EVENT_CODE_SLIPORT;
}

440 441 442 443 444 445 446 447 448
static void be_mcc_event_process(struct be_adapter *adapter,
				 struct be_mcc_compl *compl)
{
	if (is_link_state_evt(compl->flags))
		be_async_link_state_process(adapter, compl);
	else if (is_grp5_evt(compl->flags))
		be_async_grp5_evt_process(adapter, compl);
	else if (is_dbg_evt(compl->flags))
		be_async_dbg_evt_process(adapter, compl);
449 450
	else if (is_sliport_evt(compl->flags))
		be_async_sliport_evt_process(adapter, compl);
451 452
}

453
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
454
{
455
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
456
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
457 458 459 460 461 462 463 464

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

465 466 467 468 469 470 471 472 473 474 475 476
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
477 478
	spin_lock_bh(&adapter->mcc_cq_lock);

479
	adapter->mcc_obj.rearm_cq = false;
480 481 482
	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);

	spin_unlock_bh(&adapter->mcc_cq_lock);
483 484
}

S
Sathya Perla 已提交
485
int be_process_mcc(struct be_adapter *adapter)
486
{
487
	struct be_mcc_compl *compl;
S
Sathya Perla 已提交
488
	int num = 0, status = 0;
489
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
490

491
	spin_lock(&adapter->mcc_cq_lock);
492

493
	while ((compl = be_mcc_compl_get(adapter))) {
494
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
495
			be_mcc_event_process(adapter, compl);
496
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
497 498
			status = be_mcc_compl_process(adapter, compl);
			atomic_dec(&mcc_obj->q.used);
499 500 501 502
		}
		be_mcc_compl_use(compl);
		num++;
	}
503

S
Sathya Perla 已提交
504 505 506
	if (num)
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);

507
	spin_unlock(&adapter->mcc_cq_lock);
S
Sathya Perla 已提交
508
	return status;
509 510
}

511
/* Wait till no more pending mcc requests are present */
512
static int be_mcc_wait_compl(struct be_adapter *adapter)
513
{
514
#define mcc_timeout		120000 /* 12s timeout */
S
Sathya Perla 已提交
515
	int i, status = 0;
S
Sathya Perla 已提交
516 517
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

518
	for (i = 0; i < mcc_timeout; i++) {
519
		if (be_check_error(adapter, BE_ERROR_ANY))
520 521
			return -EIO;

522
		local_bh_disable();
S
Sathya Perla 已提交
523
		status = be_process_mcc(adapter);
524
		local_bh_enable();
525

S
Sathya Perla 已提交
526
		if (atomic_read(&mcc_obj->q.used) == 0)
527 528 529
			break;
		udelay(100);
	}
530
	if (i == mcc_timeout) {
531
		dev_err(&adapter->pdev->dev, "FW not responding\n");
532
		be_set_error(adapter, BE_ERROR_FW);
533
		return -EIO;
534
	}
S
Sathya Perla 已提交
535
	return status;
536 537 538
}

/* Notify MCC requests and wait for completion */
539
static int be_mcc_notify_wait(struct be_adapter *adapter)
540
{
541 542 543 544 545 546 547 548 549 550 551
	int status;
	struct be_mcc_wrb *wrb;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
	u16 index = mcc_obj->q.head;
	struct be_cmd_resp_hdr *resp;

	index_dec(&index, mcc_obj->q.len);
	wrb = queue_index_node(&mcc_obj->q, index);

	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);

552 553 554
	status = be_mcc_notify(adapter);
	if (status)
		goto out;
555 556 557 558 559

	status = be_mcc_wait_compl(adapter);
	if (status == -EIO)
		goto out;

560 561 562
	status = (resp->base_status |
		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
		   CQE_ADDL_STATUS_SHIFT));
563 564
out:
	return status;
565 566
}

567
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
568
{
569
	int msecs = 0;
S
Sathya Perla 已提交
570 571 572
	u32 ready;

	do {
573
		if (be_check_error(adapter, BE_ERROR_ANY))
574 575
			return -EIO;

576
		ready = ioread32(db);
577
		if (ready == 0xffffffff)
578 579 580
			return -1;

		ready &= MPU_MAILBOX_DB_RDY_MASK;
S
Sathya Perla 已提交
581 582 583
		if (ready)
			break;

584
		if (msecs > 4000) {
585
			dev_err(&adapter->pdev->dev, "FW not responding\n");
586
			be_set_error(adapter, BE_ERROR_FW);
587
			be_detect_error(adapter);
S
Sathya Perla 已提交
588 589 590
			return -1;
		}

591
		msleep(1);
592
		msecs++;
S
Sathya Perla 已提交
593 594 595 596 597 598 599
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
600
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
601
 */
602
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
603 604 605
{
	int status;
	u32 val = 0;
606 607
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
608
	struct be_mcc_mailbox *mbox = mbox_mem->va;
609
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
610

611 612 613 614 615
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

S
Sathya Perla 已提交
616 617 618 619 620 621
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
622
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
623 624 625 626 627 628 629 630
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

631
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
632 633 634
	if (status != 0)
		return status;

635
	/* A cq entry has been made now */
636 637 638
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
639 640 641
		if (status)
			return status;
	} else {
642
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
643 644
		return -1;
	}
645
	return 0;
S
Sathya Perla 已提交
646 647
}

648
static u16 be_POST_stage_get(struct be_adapter *adapter)
S
Sathya Perla 已提交
649
{
650 651
	u32 sem;

652 653
	if (BEx_chip(adapter))
		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
S
Sathya Perla 已提交
654
	else
655 656 657 658
		pci_read_config_dword(adapter->pdev,
				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);

	return sem & POST_STAGE_MASK;
S
Sathya Perla 已提交
659 660
}

661
static int lancer_wait_ready(struct be_adapter *adapter)
662 663 664
{
#define SLIPORT_READY_TIMEOUT 30
	u32 sliport_status;
665
	int i;
666 667 668 669

	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
670
			return 0;
671

672 673 674
		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
			return -EIO;
675

676
		msleep(1000);
677
	}
678

679
	return sliport_status ? : -1;
680 681 682
}

int be_fw_wait_ready(struct be_adapter *adapter)
S
Sathya Perla 已提交
683
{
684 685
	u16 stage;
	int status, timeout = 0;
686
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
687

688 689
	if (lancer_chip(adapter)) {
		status = lancer_wait_ready(adapter);
690 691 692 693 694
		if (status) {
			stage = status;
			goto err;
		}
		return 0;
695 696
	}

697
	do {
698 699 700 701
		/* There's no means to poll POST state on BE2/3 VFs */
		if (BEx_chip(adapter) && be_virtfn(adapter))
			return 0;

702
		stage = be_POST_stage_get(adapter);
G
Gavin Shan 已提交
703
		if (stage == POST_STAGE_ARMFW_RDY)
704
			return 0;
G
Gavin Shan 已提交
705

706
		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
G
Gavin Shan 已提交
707 708 709
		if (msleep_interruptible(2000)) {
			dev_err(dev, "Waiting for POST aborted\n");
			return -EINTR;
710
		}
G
Gavin Shan 已提交
711
		timeout += 2;
712
	} while (timeout < 60);
S
Sathya Perla 已提交
713

714 715
err:
	dev_err(dev, "POST timeout; stage=%#x\n", stage);
716
	return -ETIMEDOUT;
S
Sathya Perla 已提交
717 718 719 720 721 722 723
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

724
static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
725 726 727 728
{
	wrb->tag0 = addr & 0xFFFFFFFF;
	wrb->tag1 = upper_32_bits(addr);
}
S
Sathya Perla 已提交
729 730

/* Don't touch the hdr after it's prepared */
S
Somnath Kotur 已提交
731 732
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
733 734 735
				   u8 subsystem, u8 opcode, int cmd_len,
				   struct be_mcc_wrb *wrb,
				   struct be_dma_mem *mem)
S
Sathya Perla 已提交
736
{
S
Somnath Kotur 已提交
737 738
	struct be_sge *sge;

S
Sathya Perla 已提交
739 740 741
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
742
	req_hdr->version = 0;
743
	fill_wrb_tags(wrb, (ulong) req_hdr);
S
Somnath Kotur 已提交
744 745 746 747 748 749 750 751 752 753 754
	wrb->payload_length = cmd_len;
	if (mem) {
		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
			MCC_WRB_SGE_CNT_SHIFT;
		sge = nonembedded_sgl(wrb);
		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
		sge->len = cpu_to_le32(mem->size);
	} else
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	be_dws_cpu_to_le(wrb, 8);
S
Sathya Perla 已提交
755 756 757
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
758
				      struct be_dma_mem *mem)
S
Sathya Perla 已提交
759 760 761 762 763 764 765 766 767 768 769
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

770
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
771
{
772 773 774 775 776
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
777 778
}

779
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
780
{
781 782 783
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

784 785 786
	if (!mccq->created)
		return NULL;

787
	if (atomic_read(&mccq->used) >= mccq->len)
788 789
		return NULL;

790 791 792 793
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
794 795 796
	return wrb;
}

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
static bool use_mcc(struct be_adapter *adapter)
{
	return adapter->mcc_obj.q.created;
}

/* Must be used only in process context */
static int be_cmd_lock(struct be_adapter *adapter)
{
	if (use_mcc(adapter)) {
		spin_lock_bh(&adapter->mcc_lock);
		return 0;
	} else {
		return mutex_lock_interruptible(&adapter->mbox_lock);
	}
}

/* Must be used only in process context */
static void be_cmd_unlock(struct be_adapter *adapter)
{
	if (use_mcc(adapter))
		spin_unlock_bh(&adapter->mcc_lock);
	else
		return mutex_unlock(&adapter->mbox_lock);
}

static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
				      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;

	if (use_mcc(adapter)) {
		dest_wrb = wrb_from_mccq(adapter);
		if (!dest_wrb)
			return NULL;
	} else {
		dest_wrb = wrb_from_mbox(adapter);
	}

	memcpy(dest_wrb, wrb, sizeof(*wrb));
	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));

	return dest_wrb;
}

/* Must be used only in process context */
static int be_cmd_notify_wait(struct be_adapter *adapter,
			      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;
	int status;

	status = be_cmd_lock(adapter);
	if (status)
		return status;

	dest_wrb = be_cmd_copy(adapter, wrb);
	if (!dest_wrb)
		return -EBUSY;

	if (use_mcc(adapter))
		status = be_mcc_notify_wait(adapter);
	else
		status = be_mbox_notify_wait(adapter);

	if (!status)
		memcpy(wrb, dest_wrb, sizeof(*wrb));

	be_cmd_unlock(adapter);
	return status;
}

869 870 871 872 873 874 875 876
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

877 878 879
	if (lancer_chip(adapter))
		return 0;

880 881
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
882 883

	wrb = (u8 *)wrb_from_mbox(adapter);
S
Sathya Perla 已提交
884 885 886 887 888 889 890 891
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
892 893 894

	status = be_mbox_notify_wait(adapter);

895
	mutex_unlock(&adapter->mbox_lock);
896 897 898 899 900 901 902 903 904 905 906
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

907 908 909
	if (lancer_chip(adapter))
		return 0;

910 911
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
912 913 914 915 916 917 918 919 920 921 922 923 924

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

925
	mutex_unlock(&adapter->mbox_lock);
926 927
	return status;
}
928

S
Sathya Perla 已提交
929
int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
S
Sathya Perla 已提交
930
{
931 932
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
933 934
	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
	int status, ver = 0;
S
Sathya Perla 已提交
935

936 937
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
938 939 940

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
941

S
Somnath Kotur 已提交
942
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
943 944
			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
			       NULL);
S
Sathya Perla 已提交
945

S
Sathya Perla 已提交
946 947 948 949 950
	/* Support for EQ_CREATEv2 available only SH-R onwards */
	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
		ver = 2;

	req->hdr.version = ver;
S
Sathya Perla 已提交
951 952 953 954 955 956
	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
S
Sathya Perla 已提交
957
		      __ilog2_u32(eqo->q.len / 256));
S
Sathya Perla 已提交
958 959 960 961
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

962
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
963
	if (!status) {
964
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
965

S
Sathya Perla 已提交
966 967 968 969
		eqo->q.id = le16_to_cpu(resp->eq_id);
		eqo->msix_idx =
			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
		eqo->q.created = true;
S
Sathya Perla 已提交
970
	}
971

972
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
973 974 975
	return status;
}

976
/* Use MCC */
977
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
978
			  bool permanent, u32 if_handle, u32 pmac_id)
S
Sathya Perla 已提交
979
{
980 981
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
982 983
	int status;

984
	spin_lock_bh(&adapter->mcc_lock);
985

986 987 988 989 990
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
991
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
992

S
Somnath Kotur 已提交
993
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
994 995
			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
			       NULL);
996
	req->type = MAC_ADDRESS_TYPE_NETWORK;
S
Sathya Perla 已提交
997 998 999
	if (permanent) {
		req->permanent = 1;
	} else {
K
Kalesh AP 已提交
1000
		req->if_id = cpu_to_le16((u16)if_handle);
1001
		req->pmac_id = cpu_to_le32(pmac_id);
S
Sathya Perla 已提交
1002 1003 1004
		req->permanent = 0;
	}

1005
	status = be_mcc_notify_wait(adapter);
1006 1007
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1008

S
Sathya Perla 已提交
1009
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1010
	}
S
Sathya Perla 已提交
1011

1012 1013
err:
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1014 1015 1016
	return status;
}

1017
/* Uses synchronous MCCQ */
1018
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1019
		    u32 if_id, u32 *pmac_id, u32 domain)
S
Sathya Perla 已提交
1020
{
1021 1022
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
1023 1024
	int status;

1025 1026 1027
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1028 1029 1030 1031
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1032
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1033

S
Somnath Kotur 已提交
1034
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1035 1036
			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
			       NULL);
S
Sathya Perla 已提交
1037

1038
	req->hdr.domain = domain;
S
Sathya Perla 已提交
1039 1040 1041
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

1042
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1043 1044
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1045

S
Sathya Perla 已提交
1046 1047 1048
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

1049
err:
1050
	spin_unlock_bh(&adapter->mcc_lock);
1051 1052 1053 1054

	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
		status = -EPERM;

S
Sathya Perla 已提交
1055 1056 1057
	return status;
}

1058
/* Uses synchronous MCCQ */
1059
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
S
Sathya Perla 已提交
1060
{
1061 1062
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
1063 1064
	int status;

1065 1066 1067
	if (pmac_id == -1)
		return 0;

1068 1069 1070
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1071 1072 1073 1074
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1075
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1076

S
Somnath Kotur 已提交
1077
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
K
Kalesh AP 已提交
1078 1079
			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
			       wrb, NULL);
S
Sathya Perla 已提交
1080

1081
	req->hdr.domain = dom;
S
Sathya Perla 已提交
1082 1083 1084
	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

1085 1086
	status = be_mcc_notify_wait(adapter);

1087
err:
1088
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1089 1090 1091
	return status;
}

1092
/* Uses Mbox */
S
Sathya Perla 已提交
1093
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1094
		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
S
Sathya Perla 已提交
1095
{
1096 1097
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
1098
	struct be_dma_mem *q_mem = &cq->dma_mem;
1099
	void *ctxt;
S
Sathya Perla 已提交
1100 1101
	int status;

1102 1103
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1104 1105 1106 1107

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
1108

S
Somnath Kotur 已提交
1109
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1110 1111
			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
			       NULL);
S
Sathya Perla 已提交
1112 1113

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1114 1115

	if (BEx_chip(adapter)) {
1116
		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1117
			      coalesce_wm);
1118
		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1119
			      ctxt, no_delay);
1120
		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1121
			      __ilog2_u32(cq->len / 256));
1122 1123 1124
		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1125 1126 1127
	} else {
		req->hdr.version = 2;
		req->page_size = 1; /* 1 for 4K */
1128 1129 1130 1131 1132 1133 1134

		/* coalesce-wm field in this cmd is not relevant to Lancer.
		 * Lancer uses COMMON_MODIFY_CQ to set this field
		 */
		if (!lancer_chip(adapter))
			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
				      ctxt, coalesce_wm);
1135
		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1136
			      no_delay);
1137
		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1138
			      __ilog2_u32(cq->len / 256));
1139
		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1140 1141
		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1142
	}
S
Sathya Perla 已提交
1143 1144 1145 1146 1147

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1148
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1149
	if (!status) {
1150
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1151

S
Sathya Perla 已提交
1152 1153 1154
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
1155

1156
	mutex_unlock(&adapter->mbox_lock);
1157 1158 1159 1160 1161 1162 1163

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1164

1165 1166 1167 1168 1169
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

J
Jingoo Han 已提交
1170
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1171 1172
				  struct be_queue_info *mccq,
				  struct be_queue_info *cq)
1173
{
1174
	struct be_mcc_wrb *wrb;
1175
	struct be_cmd_req_mcc_ext_create *req;
1176
	struct be_dma_mem *q_mem = &mccq->dma_mem;
1177
	void *ctxt;
1178 1179
	int status;

1180 1181
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1182 1183 1184 1185

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
1186

S
Somnath Kotur 已提交
1187
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1188 1189
			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
			       NULL);
1190

1191
	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1192
	if (BEx_chip(adapter)) {
1193 1194
		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1195
			      be_encoded_q_len(mccq->len));
1196
		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	} else {
		req->hdr.version = 1;
		req->cq_id = cpu_to_le16(cq->id);

		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
			      be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
			      ctxt, cq->id);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
			      ctxt, 1);
1208
	}
1209

1210 1211 1212 1213 1214 1215 1216 1217 1218
	/* Subscribe to Link State, Sliport Event and Group 5 Events
	 * (bits 1, 5 and 17 set)
	 */
	req->async_event_bitmap[0] =
			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
				    BIT(ASYNC_EVENT_CODE_GRP_5) |
				    BIT(ASYNC_EVENT_CODE_QNQ) |
				    BIT(ASYNC_EVENT_CODE_SLIPORT));

1219 1220 1221 1222
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1223
	status = be_mbox_notify_wait(adapter);
1224 1225
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1226

1227 1228 1229
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
1230
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1231 1232 1233 1234

	return status;
}

J
Jingoo Han 已提交
1235
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1236 1237
				  struct be_queue_info *mccq,
				  struct be_queue_info *cq)
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
	struct be_dma_mem *q_mem = &mccq->dma_mem;
	void *ctxt;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;

S
Somnath Kotur 已提交
1252
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1253 1254
			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
			       NULL);
1255 1256 1257 1258 1259

	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1260
		      be_encoded_q_len(mccq->len));
1261 1262 1263 1264 1265 1266 1267 1268 1269
	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}

	mutex_unlock(&adapter->mbox_lock);
	return status;
}

int be_cmd_mccq_create(struct be_adapter *adapter,
1280
		       struct be_queue_info *mccq, struct be_queue_info *cq)
1281 1282 1283 1284
{
	int status;

	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1285
	if (status && BEx_chip(adapter)) {
1286 1287 1288 1289 1290 1291 1292 1293
		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
			"or newer to avoid conflicting priorities between NIC "
			"and FCoE traffic");
		status = be_cmd_mccq_org_create(adapter, mccq, cq);
	}
	return status;
}

V
Vasundhara Volam 已提交
1294
int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
S
Sathya Perla 已提交
1295
{
1296
	struct be_mcc_wrb wrb = {0};
1297
	struct be_cmd_req_eth_tx_create *req;
V
Vasundhara Volam 已提交
1298 1299
	struct be_queue_info *txq = &txo->q;
	struct be_queue_info *cq = &txo->cq;
S
Sathya Perla 已提交
1300
	struct be_dma_mem *q_mem = &txq->dma_mem;
V
Vasundhara Volam 已提交
1301
	int status, ver = 0;
S
Sathya Perla 已提交
1302

1303
	req = embedded_payload(&wrb);
S
Somnath Kotur 已提交
1304
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1305
			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
S
Sathya Perla 已提交
1306

1307 1308
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
V
Vasundhara Volam 已提交
1309 1310 1311 1312 1313
	} else if (BEx_chip(adapter)) {
		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
			req->hdr.version = 2;
	} else { /* For SH */
		req->hdr.version = 2;
1314 1315
	}

1316 1317
	if (req->hdr.version > 0)
		req->if_id = cpu_to_le16(adapter->if_handle);
S
Sathya Perla 已提交
1318 1319 1320
	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
V
Vasundhara Volam 已提交
1321 1322
	req->cq_id = cpu_to_le16(cq->id);
	req->queue_size = be_encoded_q_len(txq->len);
S
Sathya Perla 已提交
1323
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
V
Vasundhara Volam 已提交
1324 1325
	ver = req->hdr.version;

1326
	status = be_cmd_notify_wait(adapter, &wrb);
S
Sathya Perla 已提交
1327
	if (!status) {
1328
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1329

S
Sathya Perla 已提交
1330
		txq->id = le16_to_cpu(resp->cid);
V
Vasundhara Volam 已提交
1331 1332 1333 1334
		if (ver == 2)
			txo->db_offset = le32_to_cpu(resp->db_offset);
		else
			txo->db_offset = DB_TXULP1_OFFSET;
S
Sathya Perla 已提交
1335 1336
		txq->created = true;
	}
1337

S
Sathya Perla 已提交
1338 1339 1340
	return status;
}

1341
/* Uses MCC */
1342
int be_cmd_rxq_create(struct be_adapter *adapter,
1343 1344
		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		      u32 if_id, u32 rss, u8 *rss_id)
S
Sathya Perla 已提交
1345
{
1346 1347
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
1348 1349 1350
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

1351
	spin_lock_bh(&adapter->mcc_lock);
1352

1353 1354 1355 1356 1357
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1358
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1359

S
Somnath Kotur 已提交
1360
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1361
			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
S
Sathya Perla 已提交
1362 1363 1364 1365 1366 1367

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
S
Sathya Perla 已提交
1368
	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
S
Sathya Perla 已提交
1369 1370
	req->rss_queue = cpu_to_le32(rss);

1371
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1372 1373
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1374

S
Sathya Perla 已提交
1375 1376
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
1377
		*rss_id = resp->rss_id;
S
Sathya Perla 已提交
1378
	}
1379

1380 1381
err:
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1382 1383 1384
	return status;
}

1385 1386 1387
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
1388
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1389
		     int queue_type)
S
Sathya Perla 已提交
1390
{
1391 1392
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
1393 1394 1395
	u8 subsys = 0, opcode = 0;
	int status;

1396 1397
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
S
Sathya Perla 已提交
1398

1399 1400 1401
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
1419 1420 1421 1422
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
1423
	default:
1424
		BUG();
S
Sathya Perla 已提交
1425
	}
1426

S
Somnath Kotur 已提交
1427
	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1428
			       NULL);
S
Sathya Perla 已提交
1429 1430
	req->id = cpu_to_le16(q->id);

1431
	status = be_mbox_notify_wait(adapter);
1432
	q->created = false;
1433

1434
	mutex_unlock(&adapter->mbox_lock);
1435 1436
	return status;
}
S
Sathya Perla 已提交
1437

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
/* Uses MCC */
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
1454
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1455
			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1456 1457 1458
	req->id = cpu_to_le16(q->id);

	status = be_mcc_notify_wait(adapter);
1459
	q->created = false;
1460 1461 1462

err:
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1463 1464 1465
	return status;
}

1466
/* Create an rx filtering policy configuration on an i/f
1467
 * Will use MBOX only if MCCQ has not been created.
1468
 */
1469
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1470
		     u32 *if_handle, u32 domain)
S
Sathya Perla 已提交
1471
{
1472
	struct be_mcc_wrb wrb = {0};
1473
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
1474 1475
	int status;

1476
	req = embedded_payload(&wrb);
S
Somnath Kotur 已提交
1477
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1478 1479
			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
			       sizeof(*req), &wrb, NULL);
1480
	req->hdr.domain = domain;
1481 1482
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
1483
	req->pmac_invalid = true;
S
Sathya Perla 已提交
1484

1485
	status = be_cmd_notify_wait(adapter, &wrb);
S
Sathya Perla 已提交
1486
	if (!status) {
1487
		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1488

S
Sathya Perla 已提交
1489
		*if_handle = le32_to_cpu(resp->interface_id);
S
Sathya Perla 已提交
1490 1491

		/* Hack to retrieve VF's pmac-id on BE3 */
1492
		if (BE3_chip(adapter) && be_virtfn(adapter))
S
Sathya Perla 已提交
1493
			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
S
Sathya Perla 已提交
1494 1495 1496 1497
	}
	return status;
}

1498
/* Uses MCCQ */
1499
int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
S
Sathya Perla 已提交
1500
{
1501 1502
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
1503 1504
	int status;

1505
	if (interface_id == -1)
1506
		return 0;
1507

1508 1509 1510 1511 1512 1513 1514
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1515
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1516

S
Somnath Kotur 已提交
1517
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1518 1519
			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
			       sizeof(*req), wrb, NULL);
1520
	req->hdr.domain = domain;
S
Sathya Perla 已提交
1521
	req->interface_id = cpu_to_le32(interface_id);
1522

1523 1524 1525
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1526 1527 1528 1529 1530
	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
1531
 * Uses asynchronous MCC
S
Sathya Perla 已提交
1532
 */
1533
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
1534
{
1535
	struct be_mcc_wrb *wrb;
1536
	struct be_cmd_req_hdr *hdr;
1537
	int status = 0;
S
Sathya Perla 已提交
1538

1539
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1540

1541
	wrb = wrb_from_mccq(adapter);
1542 1543 1544 1545
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1546
	hdr = nonemb_cmd->va;
S
Sathya Perla 已提交
1547

S
Somnath Kotur 已提交
1548
	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1549 1550
			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
			       nonemb_cmd);
1551

1552
	/* version 1 of the cmd is not supported only by BE2 */
1553 1554 1555
	if (BE2_chip(adapter))
		hdr->version = 0;
	if (BE3_chip(adapter) || lancer_chip(adapter))
1556
		hdr->version = 1;
1557 1558
	else
		hdr->version = 2;
1559

1560 1561 1562 1563
	status = be_mcc_notify(adapter);
	if (status)
		goto err;

A
Ajit Khaparde 已提交
1564
	adapter->stats_cmd_sent = true;
S
Sathya Perla 已提交
1565

1566
err:
1567
	spin_unlock_bh(&adapter->mcc_lock);
1568
	return status;
S
Sathya Perla 已提交
1569 1570
}

S
Selvin Xavier 已提交
1571 1572
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1573
			       struct be_dma_mem *nonemb_cmd)
S
Selvin Xavier 已提交
1574 1575 1576 1577 1578
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_pport_stats *req;
	int status = 0;

1579 1580 1581 1582
	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
			    CMD_SUBSYSTEM_ETH))
		return -EPERM;

S
Selvin Xavier 已提交
1583 1584 1585 1586 1587 1588 1589 1590 1591
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;

S
Somnath Kotur 已提交
1592
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1593 1594
			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
			       wrb, nonemb_cmd);
S
Selvin Xavier 已提交
1595

1596
	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
S
Selvin Xavier 已提交
1597 1598
	req->cmd_params.params.reset_stats = 0;

1599 1600 1601 1602
	status = be_mcc_notify(adapter);
	if (status)
		goto err;

S
Selvin Xavier 已提交
1603 1604 1605 1606 1607 1608 1609
	adapter->stats_cmd_sent = true;

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
static int be_mac_to_link_speed(int mac_speed)
{
	switch (mac_speed) {
	case PHY_LINK_SPEED_ZERO:
		return 0;
	case PHY_LINK_SPEED_10MBPS:
		return 10;
	case PHY_LINK_SPEED_100MBPS:
		return 100;
	case PHY_LINK_SPEED_1GBPS:
		return 1000;
	case PHY_LINK_SPEED_10GBPS:
		return 10000;
1623 1624 1625 1626 1627 1628
	case PHY_LINK_SPEED_20GBPS:
		return 20000;
	case PHY_LINK_SPEED_25GBPS:
		return 25000;
	case PHY_LINK_SPEED_40GBPS:
		return 40000;
1629 1630 1631 1632 1633 1634 1635 1636 1637
	}
	return 0;
}

/* Uses synchronous mcc
 * Returns link_speed in Mbps
 */
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
			     u8 *link_status, u32 dom)
S
Sathya Perla 已提交
1638
{
1639 1640
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
1641 1642
	int status;

1643 1644
	spin_lock_bh(&adapter->mcc_lock);

1645 1646 1647
	if (link_status)
		*link_status = LINK_DOWN;

1648
	wrb = wrb_from_mccq(adapter);
1649 1650 1651 1652
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1653
	req = embedded_payload(wrb);
1654

1655
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1656 1657
			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
			       sizeof(*req), wrb, NULL);
1658

1659 1660
	/* version 1 of the cmd is not supported only by BE2 */
	if (!BE2_chip(adapter))
1661 1662
		req->hdr.version = 1;

1663
	req->hdr.domain = dom;
S
Sathya Perla 已提交
1664

1665
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1666 1667
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1668

1669 1670 1671 1672 1673 1674 1675
		if (link_speed) {
			*link_speed = resp->link_speed ?
				      le16_to_cpu(resp->link_speed) * 10 :
				      be_mac_to_link_speed(resp->mac_speed);

			if (!resp->logical_link_status)
				*link_speed = 0;
1676
		}
1677 1678
		if (link_status)
			*link_status = resp->logical_link_status;
S
Sathya Perla 已提交
1679 1680
	}

1681
err:
1682
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1683 1684 1685
	return status;
}

1686 1687 1688 1689 1690
/* Uses synchronous mcc */
int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_cntl_addnl_attribs *req;
1691
	int status = 0;
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
1702
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1703 1704
			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
			       sizeof(*req), wrb, NULL);
1705

1706
	status = be_mcc_notify(adapter);
1707 1708 1709 1710 1711
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
/* Uses synchronous mcc */
int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fat *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
1728
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1729 1730
			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
			       NULL);
1731 1732 1733 1734
	req->fat_operation = cpu_to_le32(QUERY_FAT);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1735

1736
		if (log_size && resp->log_size)
1737 1738
			*log_size = le32_to_cpu(resp->log_size) -
					sizeof(u32);
1739 1740 1741 1742 1743 1744
	}
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1745
int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1746 1747 1748 1749
{
	struct be_dma_mem get_fat_cmd;
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fat *req;
1750 1751
	u32 offset = 0, total_size, buf_size,
				log_offset = sizeof(u32), payload_len;
1752
	int status = 0;
1753 1754

	if (buf_len == 0)
1755
		return -EIO;
1756 1757 1758

	total_size = buf_len;

1759
	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1760 1761 1762
	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
					     get_fat_cmd.size,
					     &get_fat_cmd.dma, GFP_ATOMIC);
1763 1764
	if (!get_fat_cmd.va) {
		dev_err(&adapter->pdev->dev,
K
Kalesh AP 已提交
1765
			"Memory allocation failure while reading FAT data\n");
1766
		return -ENOMEM;
1767 1768
	}

1769 1770 1771 1772 1773 1774
	spin_lock_bh(&adapter->mcc_lock);

	while (total_size) {
		buf_size = min(total_size, (u32)60*1024);
		total_size -= buf_size;

1775 1776 1777
		wrb = wrb_from_mccq(adapter);
		if (!wrb) {
			status = -EBUSY;
1778 1779 1780 1781
			goto err;
		}
		req = get_fat_cmd.va;

1782
		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
S
Somnath Kotur 已提交
1783
		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784 1785
				       OPCODE_COMMON_MANAGE_FAT, payload_len,
				       wrb, &get_fat_cmd);
1786 1787 1788 1789 1790 1791 1792 1793 1794

		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
		req->read_log_offset = cpu_to_le32(log_offset);
		req->read_log_length = cpu_to_le32(buf_size);
		req->data_buffer_size = cpu_to_le32(buf_size);

		status = be_mcc_notify_wait(adapter);
		if (!status) {
			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1795

1796
			memcpy(buf + offset,
1797 1798
			       resp->data_buffer,
			       le32_to_cpu(resp->read_log_length));
1799
		} else {
1800
			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1801 1802
			goto err;
		}
1803 1804 1805 1806
		offset += buf_size;
		log_offset += buf_size;
	}
err:
1807 1808
	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
			  get_fat_cmd.va, get_fat_cmd.dma);
1809
	spin_unlock_bh(&adapter->mcc_lock);
1810
	return status;
1811 1812
}

1813
/* Uses synchronous mcc */
1814
int be_cmd_get_fw_ver(struct be_adapter *adapter)
S
Sathya Perla 已提交
1815
{
1816 1817
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
1818 1819
	int status;

1820
	spin_lock_bh(&adapter->mcc_lock);
1821

1822 1823 1824 1825 1826
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
S
Sathya Perla 已提交
1827

1828
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1829

S
Somnath Kotur 已提交
1830
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1831 1832
			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
			       NULL);
1833
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1834 1835
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
1836

1837 1838 1839 1840
		strlcpy(adapter->fw_ver, resp->firmware_version_string,
			sizeof(adapter->fw_ver));
		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
			sizeof(adapter->fw_on_flash));
S
Sathya Perla 已提交
1841
	}
1842 1843
err:
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1844 1845 1846
	return status;
}

1847 1848 1849
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
1850 1851
static int __be_cmd_modify_eqd(struct be_adapter *adapter,
			       struct be_set_eqd *set_eqd, int num)
S
Sathya Perla 已提交
1852
{
1853 1854
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
1855
	int status = 0, i;
S
Sathya Perla 已提交
1856

1857 1858 1859
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1860 1861 1862 1863
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1864
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1865

S
Somnath Kotur 已提交
1866
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1867 1868
			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
			       NULL);
S
Sathya Perla 已提交
1869

1870 1871 1872 1873 1874 1875 1876
	req->num_eq = cpu_to_le32(num);
	for (i = 0; i < num; i++) {
		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
		req->set_eqd[i].phase = 0;
		req->set_eqd[i].delay_multiplier =
				cpu_to_le32(set_eqd[i].delay_multiplier);
	}
S
Sathya Perla 已提交
1877

1878
	status = be_mcc_notify(adapter);
1879
err:
1880
	spin_unlock_bh(&adapter->mcc_lock);
1881
	return status;
S
Sathya Perla 已提交
1882 1883
}

1884 1885 1886 1887 1888
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
		      int num)
{
	int num_eqs, i = 0;

1889 1890 1891 1892 1893
	while (num) {
		num_eqs = min(num, 8);
		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
		i += num_eqs;
		num -= num_eqs;
1894 1895 1896 1897 1898
	}

	return 0;
}

1899
/* Uses sycnhronous mcc */
1900
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1901
		       u32 num, u32 domain)
S
Sathya Perla 已提交
1902
{
1903 1904
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
1905 1906
	int status;

1907 1908 1909
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1910 1911 1912 1913
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1914
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1915

S
Somnath Kotur 已提交
1916
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1917 1918
			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
			       wrb, NULL);
1919
	req->hdr.domain = domain;
S
Sathya Perla 已提交
1920 1921

	req->interface_id = if_id;
1922
	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
S
Sathya Perla 已提交
1923
	req->num_vlan = num;
1924 1925
	memcpy(req->normal_vlan, vtag_array,
	       req->num_vlan * sizeof(vtag_array[0]));
S
Sathya Perla 已提交
1926

1927
	status = be_mcc_notify_wait(adapter);
1928
err:
1929
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1930 1931 1932
	return status;
}

1933
static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
S
Sathya Perla 已提交
1934
{
1935
	struct be_mcc_wrb *wrb;
1936 1937
	struct be_dma_mem *mem = &adapter->rx_filter;
	struct be_cmd_req_rx_filter *req = mem->va;
1938
	int status;
S
Sathya Perla 已提交
1939

1940
	spin_lock_bh(&adapter->mcc_lock);
1941

1942
	wrb = wrb_from_mccq(adapter);
1943 1944 1945 1946
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1947
	memset(req, 0, sizeof(*req));
S
Somnath Kotur 已提交
1948
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1949 1950
			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
			       wrb, mem);
S
Sathya Perla 已提交
1951

1952
	req->if_id = cpu_to_le32(adapter->if_handle);
1953 1954 1955 1956
	req->if_flags_mask = cpu_to_le32(flags);
	req->if_flags = (value == ON) ? req->if_flags_mask : 0;

	if (flags & BE_IF_FLAGS_MULTICAST) {
1957
		struct netdev_hw_addr *ha;
1958
		int i = 0;
1959

1960 1961 1962
		/* Reset mcast promisc mode if already set by setting mask
		 * and not setting flags field
		 */
1963 1964
		req->if_flags_mask |=
			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1965
				    be_if_cap_flags(adapter));
1966
		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1967 1968
		netdev_for_each_mc_addr(ha, adapter->netdev)
			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
S
Sathya Perla 已提交
1969 1970
	}

1971
	status = be_mcc_notify(adapter);
1972
err:
1973
	spin_unlock_bh(&adapter->mcc_lock);
1974
	return status;
S
Sathya Perla 已提交
1975 1976
}

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
{
	struct device *dev = &adapter->pdev->dev;

	if ((flags & be_if_cap_flags(adapter)) != flags) {
		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
			 be_if_cap_flags(adapter));
	}
	flags &= be_if_cap_flags(adapter);

	return __be_cmd_rx_filter(adapter, flags, value);
}

1991
/* Uses synchrounous mcc */
1992
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1993
{
1994 1995
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1996 1997
	int status;

1998 1999 2000 2001
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

2002
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
2003

2004
	wrb = wrb_from_mccq(adapter);
2005 2006 2007 2008
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2009
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
2010

S
Somnath Kotur 已提交
2011
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2012 2013
			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
			       wrb, NULL);
S
Sathya Perla 已提交
2014

2015
	req->hdr.version = 1;
S
Sathya Perla 已提交
2016 2017 2018
	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

2019
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
2020

2021
err:
2022
	spin_unlock_bh(&adapter->mcc_lock);
2023 2024 2025 2026

	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
		return  -EOPNOTSUPP;

S
Sathya Perla 已提交
2027 2028 2029
	return status;
}

2030
/* Uses sycn mcc */
2031
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
2032
{
2033 2034
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
2035 2036
	int status;

2037 2038 2039 2040
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

2041
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
2042

2043
	wrb = wrb_from_mccq(adapter);
2044 2045 2046 2047
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2048
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
2049

S
Somnath Kotur 已提交
2050
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2051 2052
			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
			       wrb, NULL);
S
Sathya Perla 已提交
2053

2054
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
2055 2056 2057
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
2058

S
Sathya Perla 已提交
2059 2060 2061 2062
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

2063
err:
2064
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
2065 2066 2067
	return status;
}

2068
/* Uses mbox */
2069
int be_cmd_query_fw_cfg(struct be_adapter *adapter)
S
Sathya Perla 已提交
2070
{
2071 2072
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
2073 2074
	int status;

2075 2076
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
S
Sathya Perla 已提交
2077

2078 2079
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
2080

S
Somnath Kotur 已提交
2081
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2082 2083
			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
			       sizeof(*req), wrb, NULL);
S
Sathya Perla 已提交
2084

2085
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
2086 2087
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2088

2089 2090 2091 2092
		adapter->port_num = le32_to_cpu(resp->phys_port);
		adapter->function_mode = le32_to_cpu(resp->function_mode);
		adapter->function_caps = le32_to_cpu(resp->function_caps);
		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
S
Sathya Perla 已提交
2093 2094 2095
		dev_info(&adapter->pdev->dev,
			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
			 adapter->function_mode, adapter->function_caps);
S
Sathya Perla 已提交
2096 2097
	}

2098
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
2099 2100
	return status;
}
2101

2102
/* Uses mbox */
2103 2104
int be_cmd_reset_function(struct be_adapter *adapter)
{
2105 2106
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
2107 2108
	int status;

2109
	if (lancer_chip(adapter)) {
2110 2111
		iowrite32(SLI_PORT_CONTROL_IP_MASK,
			  adapter->db + SLIPORT_CONTROL_OFFSET);
2112
		status = lancer_wait_ready(adapter);
2113
		if (status)
2114 2115 2116 2117 2118
			dev_err(&adapter->pdev->dev,
				"Adapter in non recoverable error\n");
		return status;
	}

2119 2120
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
2121

2122 2123
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
2124

S
Somnath Kotur 已提交
2125
	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2126 2127
			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
			       NULL);
2128

2129
	status = be_mbox_notify_wait(adapter);
2130

2131
	mutex_unlock(&adapter->mbox_lock);
2132 2133
	return status;
}
2134

2135
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2136
		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2137 2138 2139 2140 2141
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_rss_config *req;
	int status;

2142 2143 2144
	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
		return 0;

2145
	spin_lock_bh(&adapter->mcc_lock);
2146

2147 2148 2149 2150 2151
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2152 2153
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2154
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2155
			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2156 2157

	req->if_id = cpu_to_le32(adapter->if_handle);
2158 2159
	req->enable_rss = cpu_to_le16(rss_hash_opts);
	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2160

2161
	if (!BEx_chip(adapter))
2162 2163
		req->hdr.version = 1;

2164
	memcpy(req->cpu_table, rsstable, table_size);
2165
	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2166 2167
	be_dws_cpu_to_le(req->hash, sizeof(req->hash));

2168 2169 2170
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
2171 2172 2173
	return status;
}

2174 2175
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2176
			    u8 bcn, u8 sts, u8 state)
2177 2178 2179 2180 2181 2182 2183 2184
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2185 2186 2187 2188
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2189 2190
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2191
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2192 2193
			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
			       sizeof(*req), wrb, NULL);
2194 2195 2196 2197 2198 2199 2200 2201

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

2202
err:
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2217 2218 2219 2220
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2221 2222
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2223
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2224 2225
			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
			       wrb, NULL);
2226 2227 2228 2229 2230 2231 2232

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
2233

2234 2235 2236
		*state = resp->beacon_state;
	}

2237
err:
2238 2239 2240 2241
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
				      u8 page_num, u8 *data)
{
	struct be_dma_mem cmd;
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_port_type *req;
	int status;

	if (page_num > TR_PAGE_A2)
		return -EINVAL;

	cmd.size = sizeof(struct be_cmd_resp_port_type);
2255 2256
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
		return -ENOMEM;
	}

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_READ_TRANSRECV_DATA,
			       cmd.size, wrb, &cmd);

	req->port = cpu_to_le32(adapter->hba_port_num);
	req->page_num = cpu_to_le32(page_num);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_port_type *resp = cmd.va;

		memcpy(data, resp->page_data, PAGE_DATA_LEN);
	}
err:
	spin_unlock_bh(&adapter->mcc_lock);
2285
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2286 2287 2288
	return status;
}

2289
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2290 2291 2292
			    u32 data_size, u32 data_offset,
			    const char *obj_name, u32 *data_written,
			    u8 *change_status, u8 *addn_status)
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_write_object *req;
	struct lancer_cmd_resp_write_object *resp;
	void *ctxt = NULL;
	int status;

	spin_lock_bh(&adapter->mcc_lock);
	adapter->flash_status = 0;

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err_unlock;
	}

	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2311
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2312 2313 2314
			       OPCODE_COMMON_WRITE_OBJECT,
			       sizeof(struct lancer_cmd_req_write_object), wrb,
			       NULL);
2315 2316 2317

	ctxt = &req->context;
	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2318
		      write_length, ctxt, data_size);
2319 2320 2321

	if (data_size == 0)
		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2322
			      eof, ctxt, 1);
2323 2324
	else
		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2325
			      eof, ctxt, 0);
2326 2327 2328

	be_dws_cpu_to_le(ctxt, sizeof(req->context));
	req->write_offset = cpu_to_le32(data_offset);
2329
	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2330 2331 2332
	req->descriptor_count = cpu_to_le32(1);
	req->buf_len = cpu_to_le32(data_size);
	req->addr_low = cpu_to_le32((cmd->dma +
2333 2334
				     sizeof(struct lancer_cmd_req_write_object))
				    & 0xFFFFFFFF);
2335 2336 2337
	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
				sizeof(struct lancer_cmd_req_write_object)));

2338 2339 2340 2341
	status = be_mcc_notify(adapter);
	if (status)
		goto err_unlock;

2342 2343
	spin_unlock_bh(&adapter->mcc_lock);

2344
	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2345
					 msecs_to_jiffies(60000)))
2346
		status = -ETIMEDOUT;
2347 2348 2349 2350
	else
		status = adapter->flash_status;

	resp = embedded_payload(wrb);
2351
	if (!status) {
2352
		*data_written = le32_to_cpu(resp->actual_write_len);
2353 2354
		*change_status = resp->change_status;
	} else {
2355
		*addn_status = resp->additional_status;
2356
	}
2357 2358 2359 2360 2361 2362 2363 2364

	return status;

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
int be_cmd_query_cable_type(struct be_adapter *adapter)
{
	u8 page_data[PAGE_DATA_LEN];
	int status;

	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
						   page_data);
	if (!status) {
		switch (adapter->phy.interface_type) {
		case PHY_TYPE_QSFP:
			adapter->phy.cable_type =
				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
			break;
		case PHY_TYPE_SFP_PLUS_10GB:
			adapter->phy.cable_type =
				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
			break;
		default:
			adapter->phy.cable_type = 0;
			break;
		}
	}
	return status;
}

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
int be_cmd_query_sfp_info(struct be_adapter *adapter)
{
	u8 page_data[PAGE_DATA_LEN];
	int status;

	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
						   page_data);
	if (!status) {
		strlcpy(adapter->phy.vendor_name, page_data +
			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
		strlcpy(adapter->phy.vendor_pn,
			page_data + SFP_VENDOR_PN_OFFSET,
			SFP_VENDOR_NAME_LEN - 1);
	}

	return status;
}

2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
{
	struct lancer_cmd_req_delete_object *req;
	struct be_mcc_wrb *wrb;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_DELETE_OBJECT,
			       sizeof(*req), wrb, NULL);

2428
	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2429 2430 2431 2432 2433 2434 2435

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2436
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2437 2438
			   u32 data_size, u32 data_offset, const char *obj_name,
			   u32 *data_read, u32 *eof, u8 *addn_status)
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_read_object *req;
	struct lancer_cmd_resp_read_object *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err_unlock;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2456 2457 2458
			       OPCODE_COMMON_READ_OBJECT,
			       sizeof(struct lancer_cmd_req_read_object), wrb,
			       NULL);
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482

	req->desired_read_len = cpu_to_le32(data_size);
	req->read_offset = cpu_to_le32(data_offset);
	strcpy(req->object_name, obj_name);
	req->descriptor_count = cpu_to_le32(1);
	req->buf_len = cpu_to_le32(data_size);
	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));

	status = be_mcc_notify_wait(adapter);

	resp = embedded_payload(wrb);
	if (!status) {
		*data_read = le32_to_cpu(resp->actual_read_len);
		*eof = le32_to_cpu(resp->eof);
	} else {
		*addn_status = resp->additional_status;
	}

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2483
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2484 2485
			  u32 flash_type, u32 flash_opcode, u32 img_offset,
			  u32 buf_size)
2486
{
2487
	struct be_mcc_wrb *wrb;
2488
	struct be_cmd_write_flashrom *req;
2489 2490
	int status;

2491
	spin_lock_bh(&adapter->mcc_lock);
2492
	adapter->flash_status = 0;
2493 2494

	wrb = wrb_from_mccq(adapter);
2495 2496
	if (!wrb) {
		status = -EBUSY;
D
Dan Carpenter 已提交
2497
		goto err_unlock;
2498 2499
	}
	req = cmd->va;
2500

S
Somnath Kotur 已提交
2501
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2502 2503
			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
			       cmd);
2504 2505

	req->params.op_type = cpu_to_le32(flash_type);
2506 2507 2508
	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
		req->params.offset = cpu_to_le32(img_offset);

2509 2510 2511
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

2512 2513 2514 2515
	status = be_mcc_notify(adapter);
	if (status)
		goto err_unlock;

2516 2517
	spin_unlock_bh(&adapter->mcc_lock);

2518 2519
	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
					 msecs_to_jiffies(40000)))
2520
		status = -ETIMEDOUT;
2521 2522
	else
		status = adapter->flash_status;
2523

D
Dan Carpenter 已提交
2524 2525 2526 2527
	return status;

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
2528 2529
	return status;
}
2530

2531
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2532
			 u16 img_optype, u32 img_offset, u32 crc_offset)
2533
{
2534
	struct be_cmd_read_flash_crc *req;
2535
	struct be_mcc_wrb *wrb;
2536 2537 2538 2539 2540
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2541 2542 2543 2544
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2545 2546
	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2547
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2548 2549
			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
			       wrb, NULL);
2550

2551 2552 2553 2554 2555 2556
	req->params.op_type = cpu_to_le32(img_optype);
	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
		req->params.offset = cpu_to_le32(img_offset + crc_offset);
	else
		req->params.offset = cpu_to_le32(crc_offset);

2557
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2558
	req->params.data_buf_size = cpu_to_le32(0x4);
2559 2560 2561

	status = be_mcc_notify_wait(adapter);
	if (!status)
2562
		memcpy(flashed_crc, req->crc, 4);
2563

2564
err:
2565 2566 2567
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2568

2569
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2570
			    struct be_dma_mem *nonemb_cmd)
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;

S
Somnath Kotur 已提交
2585
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2586 2587
			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
			       wrb, nonemb_cmd);
2588 2589 2590 2591 2592 2593 2594 2595
	memcpy(req->magic_mac, mac, ETH_ALEN);

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2596

2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
			u8 loopback_type, u8 enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_lmode *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
2609
		goto err_unlock;
2610 2611 2612 2613
	}

	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2614
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2615 2616
			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
			       wrb, NULL);
2617 2618 2619 2620 2621 2622

	req->src_port = port_num;
	req->dest_port = port_num;
	req->loopback_type = loopback_type;
	req->loopback_state = enable;

2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
	status = be_mcc_notify(adapter);
	if (status)
		goto err_unlock;

	spin_unlock_bh(&adapter->mcc_lock);

	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
		status = -ETIMEDOUT;

	return status;

err_unlock:
2636 2637 2638 2639
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2640
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2641 2642
			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
			 u64 pattern)
2643 2644 2645
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_loopback_test *req;
2646
	struct be_cmd_resp_loopback_test *resp;
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2659
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2660 2661
			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
			       NULL);
2662

2663
	req->hdr.timeout = cpu_to_le32(15);
2664 2665 2666 2667 2668 2669 2670
	req->pattern = cpu_to_le64(pattern);
	req->src_port = cpu_to_le32(port_num);
	req->dest_port = cpu_to_le32(port_num);
	req->pkt_size = cpu_to_le32(pkt_size);
	req->num_pkts = cpu_to_le32(num_pkts);
	req->loopback_type = cpu_to_le32(loopback_type);

2671 2672 2673
	status = be_mcc_notify(adapter);
	if (status)
		goto err;
2674 2675

	spin_unlock_bh(&adapter->mcc_lock);
2676

2677 2678 2679 2680 2681
	wait_for_completion(&adapter->et_cmd_compl);
	resp = embedded_payload(wrb);
	status = le32_to_cpu(resp->status);

	return status;
2682 2683 2684 2685 2686 2687
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2688
			u32 byte_cnt, struct be_dma_mem *cmd)
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_ddrdma_test *req;
	int status;
	int i, j = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
S
Somnath Kotur 已提交
2703
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2704 2705
			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
			       cmd);
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719

	req->pattern = cpu_to_le64(pattern);
	req->byte_count = cpu_to_le32(byte_cnt);
	for (i = 0; i < byte_cnt; i++) {
		req->snd_buff[i] = (u8)(pattern >> (j*8));
		j++;
		if (j > 7)
			j = 0;
	}

	status = be_mcc_notify_wait(adapter);

	if (!status) {
		struct be_cmd_resp_ddrdma_test *resp;
2720

2721 2722
		resp = cmd->va;
		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
K
Kalesh AP 已提交
2723
		    resp->snd_err) {
2724 2725 2726 2727 2728 2729 2730 2731
			status = -1;
		}
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2732

2733
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2734
			    struct be_dma_mem *nonemb_cmd)
2735 2736 2737 2738 2739 2740 2741 2742
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_seeprom_read *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2743 2744 2745 2746
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2747 2748
	req = nonemb_cmd->va;

S
Somnath Kotur 已提交
2749
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2750 2751
			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
			       nonemb_cmd);
2752 2753 2754

	status = be_mcc_notify_wait(adapter);

2755
err:
2756 2757 2758
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2759

A
Ajit Khaparde 已提交
2760
int be_cmd_get_phy_info(struct be_adapter *adapter)
2761 2762 2763
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_phy_info *req;
2764
	struct be_dma_mem cmd;
2765 2766
	int status;

2767 2768 2769 2770
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

2771 2772 2773 2774 2775 2776 2777
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2778
	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2779 2780
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
2781 2782 2783 2784 2785
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
		status = -ENOMEM;
		goto err;
	}
2786

2787
	req = cmd.va;
2788

S
Somnath Kotur 已提交
2789
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2790 2791
			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
			       wrb, &cmd);
2792 2793

	status = be_mcc_notify_wait(adapter);
2794 2795 2796
	if (!status) {
		struct be_phy_info *resp_phy_info =
				cmd.va + sizeof(struct be_cmd_req_hdr);
2797

A
Ajit Khaparde 已提交
2798 2799
		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
		adapter->phy.interface_type =
2800
			le16_to_cpu(resp_phy_info->interface_type);
A
Ajit Khaparde 已提交
2801 2802 2803 2804 2805 2806
		adapter->phy.auto_speeds_supported =
			le16_to_cpu(resp_phy_info->auto_speeds_supported);
		adapter->phy.fixed_speeds_supported =
			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
		adapter->phy.misc_params =
			le32_to_cpu(resp_phy_info->misc_params);
2807 2808 2809 2810 2811 2812

		if (BE2_chip(adapter)) {
			adapter->phy.fixed_speeds_supported =
				BE_SUPPORTED_SPEED_10GBPS |
				BE_SUPPORTED_SPEED_1GBPS;
		}
2813
	}
2814
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2815 2816 2817 2818
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2819

L
Lad, Prabhakar 已提交
2820
static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_qos *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2836
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2837
			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2838 2839

	req->hdr.domain = domain;
2840 2841
	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
	req->max_bps_nic = cpu_to_le32(bps);
2842 2843 2844 2845 2846 2847 2848

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859

int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cntl_attribs *req;
	struct be_cmd_resp_cntl_attribs *resp;
	int status;
	int payload_len = max(sizeof(*req), sizeof(*resp));
	struct mgmt_controller_attrib *attribs;
	struct be_dma_mem attribs_cmd;

S
Suresh Reddy 已提交
2860 2861 2862
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

2863 2864
	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2865 2866 2867
	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
					     attribs_cmd.size,
					     &attribs_cmd.dma, GFP_ATOMIC);
2868
	if (!attribs_cmd.va) {
2869
		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
S
Suresh Reddy 已提交
2870 2871
		status = -ENOMEM;
		goto err;
2872 2873 2874 2875 2876 2877 2878 2879 2880
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = attribs_cmd.va;

S
Somnath Kotur 已提交
2881
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2882 2883
			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
			       wrb, &attribs_cmd);
2884 2885 2886

	status = be_mbox_notify_wait(adapter);
	if (!status) {
2887
		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2888 2889 2890 2891 2892
		adapter->hba_port_num = attribs->hba_attribs.phy_port;
	}

err:
	mutex_unlock(&adapter->mbox_lock);
S
Suresh Reddy 已提交
2893
	if (attribs_cmd.va)
2894 2895
		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
				  attribs_cmd.va, attribs_cmd.dma);
2896 2897
	return status;
}
2898 2899

/* Uses mbox */
2900
int be_cmd_req_native_mode(struct be_adapter *adapter)
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_func_cap *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

S
Somnath Kotur 已提交
2917
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2918 2919
			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
			       sizeof(*req), wrb, NULL);
2920 2921 2922 2923 2924 2925 2926 2927

	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
				CAPABILITY_BE3_NATIVE_ERX_API);
	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2928

2929 2930
		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
					CAPABILITY_BE3_NATIVE_ERX_API;
S
Sathya Perla 已提交
2931 2932 2933
		if (!adapter->be3_native)
			dev_warn(&adapter->pdev->dev,
				 "adapter not in advanced mode\n");
2934 2935 2936 2937 2938
	}
err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}
2939

2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
/* Get privilege(s) for a function */
int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
			     u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fn_privileges *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
			       wrb, NULL);

	req->hdr.domain = domain;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_fn_privileges *resp =
						embedded_payload(wrb);
2968

2969
		*privilege = le32_to_cpu(resp->privilege_mask);
2970 2971 2972 2973 2974 2975 2976

		/* In UMC mode FW does not return right privileges.
		 * Override with correct privilege equivalent to PF.
		 */
		if (BEx_chip(adapter) && be_is_mc(adapter) &&
		    be_physfn(adapter))
			*privilege = MAX_PRIVILEGES;
2977 2978 2979 2980 2981 2982 2983
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
/* Set privilege(s) for a function */
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
			     u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_fn_privileges *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
			       wrb, NULL);
	req->hdr.domain = domain;
	if (lancer_chip(adapter))
		req->privileges_lancer = cpu_to_le32(privileges);
	else
		req->privileges = cpu_to_le32(privileges);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

3016 3017 3018 3019
/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
 * pmac_id_valid: false => pmac_id or MAC address is requested.
 *		  If pmac_id is returned, pmac_id_valid is returned as true
 */
3020
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3021 3022
			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
			     u8 domain)
3023 3024 3025 3026 3027
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_mac_list *req;
	int status;
	int mac_count;
3028 3029 3030 3031 3032
	struct be_dma_mem get_mac_list_cmd;
	int i;

	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3033 3034 3035 3036
	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
						  get_mac_list_cmd.size,
						  &get_mac_list_cmd.dma,
						  GFP_ATOMIC);
3037 3038 3039

	if (!get_mac_list_cmd.va) {
		dev_err(&adapter->pdev->dev,
3040
			"Memory allocation failure during GET_MAC_LIST\n");
3041 3042
		return -ENOMEM;
	}
3043 3044 3045 3046 3047 3048

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
3049
		goto out;
3050
	}
3051 3052

	req = get_mac_list_cmd.va;
3053 3054

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3055 3056
			       OPCODE_COMMON_GET_MAC_LIST,
			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3057
	req->hdr.domain = domain;
3058
	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3059 3060
	if (*pmac_id_valid) {
		req->mac_id = cpu_to_le32(*pmac_id);
3061
		req->iface_id = cpu_to_le16(if_handle);
3062 3063 3064 3065
		req->perm_override = 0;
	} else {
		req->perm_override = 1;
	}
3066 3067 3068 3069

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_mac_list *resp =
3070
						get_mac_list_cmd.va;
3071 3072 3073 3074 3075 3076 3077

		if (*pmac_id_valid) {
			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
			       ETH_ALEN);
			goto out;
		}

3078 3079
		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
		/* Mac list returned could contain one or more active mac_ids
3080
		 * or one or more true or pseudo permanent mac addresses.
3081 3082
		 * If an active mac_id is present, return first active mac_id
		 * found.
3083
		 */
3084
		for (i = 0; i < mac_count; i++) {
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
			struct get_list_macaddr *mac_entry;
			u16 mac_addr_size;
			u32 mac_id;

			mac_entry = &resp->macaddr_list[i];
			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
			/* mac_id is a 32 bit value and mac_addr size
			 * is 6 bytes
			 */
			if (mac_addr_size == sizeof(u32)) {
3095
				*pmac_id_valid = true;
3096 3097 3098
				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
				*pmac_id = le32_to_cpu(mac_id);
				goto out;
3099 3100
			}
		}
3101
		/* If no active mac_id found, return first mac addr */
3102
		*pmac_id_valid = false;
3103
		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3104
		       ETH_ALEN);
3105 3106
	}

3107
out:
3108
	spin_unlock_bh(&adapter->mcc_lock);
3109 3110
	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
3111 3112 3113
	return status;
}

3114 3115
int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
			  u8 *mac, u32 if_handle, bool active, u32 domain)
3116
{
3117 3118 3119
	if (!active)
		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
					 if_handle, domain);
3120
	if (BEx_chip(adapter))
3121
		return be_cmd_mac_addr_query(adapter, mac, false,
3122
					     if_handle, curr_pmac_id);
3123 3124 3125
	else
		/* Fetch the MAC address using pmac_id */
		return be_cmd_get_mac_from_list(adapter, mac, &active,
3126 3127
						&curr_pmac_id,
						if_handle, domain);
3128 3129
}

3130 3131 3132 3133 3134
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
{
	int status;
	bool pmac_valid = false;

3135
	eth_zero_addr(mac);
3136

3137 3138 3139 3140 3141 3142 3143 3144
	if (BEx_chip(adapter)) {
		if (be_physfn(adapter))
			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
						       0);
		else
			status = be_cmd_mac_addr_query(adapter, mac, false,
						       adapter->if_handle, 0);
	} else {
3145
		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3146
						  NULL, adapter->if_handle, 0);
3147 3148
	}

3149 3150 3151
	return status;
}

3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
			u8 mac_count, u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_mac_list *req;
	int status;
	struct be_dma_mem cmd;

	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3163 3164
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_KERNEL);
3165
	if (!cmd.va)
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
		return -ENOMEM;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3178 3179
			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
			       wrb, &cmd);
3180 3181 3182 3183 3184 3185 3186 3187 3188

	req->hdr.domain = domain;
	req->mac_count = mac_count;
	if (mac_count)
		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);

	status = be_mcc_notify_wait(adapter);

err:
3189
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3190 3191 3192
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
3193

3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
/* Wrapper to delete any active MACs and provision the new mac.
 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
 * current list are active.
 */
int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
{
	bool active_mac = false;
	u8 old_mac[ETH_ALEN];
	u32 pmac_id;
	int status;

	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3206 3207
					  &pmac_id, if_id, dom);

3208 3209 3210 3211 3212 3213
	if (!status && active_mac)
		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);

	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
}

3214
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3215
			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_hsw_config *req;
	void *ctxt;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	ctxt = &req->context;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3234 3235
			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
			       NULL);
3236 3237 3238 3239 3240 3241 3242

	req->hdr.domain = domain;
	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
	if (pvid) {
		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
	}
3243 3244 3245 3246 3247 3248 3249
	if (!BEx_chip(adapter) && hsw_mode) {
		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
			      ctxt, adapter->hba_port_num);
		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
			      ctxt, hsw_mode);
	}
3250

3251 3252 3253 3254 3255 3256 3257 3258
	/* Enable/disable both mac and vlan spoof checking */
	if (!BEx_chip(adapter) && spoofchk) {
		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
			      ctxt, spoofchk);
		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
			      ctxt, spoofchk);
	}

3259 3260 3261 3262 3263 3264 3265 3266 3267 3268
	be_dws_cpu_to_le(req->context, sizeof(req->context));
	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3269
			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_hsw_config *req;
	void *ctxt;
	int status;
	u16 vid;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	ctxt = &req->context;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3289 3290
			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
			       NULL);
3291 3292

	req->hdr.domain = domain;
3293 3294
	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
		      ctxt, intf_id);
3295
	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3296

3297
	if (!BEx_chip(adapter) && mode) {
3298 3299 3300 3301
		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
			      ctxt, adapter->hba_port_num);
		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
	}
3302 3303 3304 3305 3306 3307
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_hsw_config *resp =
						embedded_payload(wrb);
3308

3309
		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3310
		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3311
				    pvid, &resp->context);
3312 3313 3314 3315 3316
		if (pvid)
			*pvid = le16_to_cpu(vid);
		if (mode)
			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
					      port_fwd_type, &resp->context);
3317 3318 3319 3320
		if (spoofchk)
			*spoofchk =
				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
					      spoofchk, &resp->context);
3321 3322 3323 3324 3325 3326 3327
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

3328 3329 3330 3331
static bool be_is_wol_excluded(struct be_adapter *adapter)
{
	struct pci_dev *pdev = adapter->pdev;

3332
	if (be_virtfn(adapter))
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
		return true;

	switch (pdev->subsystem_device) {
	case OC_SUBSYS_DEVICE_ID1:
	case OC_SUBSYS_DEVICE_ID2:
	case OC_SUBSYS_DEVICE_ID3:
	case OC_SUBSYS_DEVICE_ID4:
		return true;
	default:
		return false;
	}
}

3346 3347 3348 3349
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
S
Suresh Reddy 已提交
3350
	int status = 0;
3351 3352
	struct be_dma_mem cmd;

3353 3354 3355 3356
	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
			    CMD_SUBSYSTEM_ETH))
		return -EPERM;

S
Suresh Reddy 已提交
3357 3358 3359
	if (be_is_wol_excluded(adapter))
		return status;

S
Suresh Reddy 已提交
3360 3361 3362
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

3363 3364
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3365 3366
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
3367
	if (!cmd.va) {
3368
		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
S
Suresh Reddy 已提交
3369 3370
		status = -ENOMEM;
		goto err;
3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
S
Suresh Reddy 已提交
3383
			       sizeof(*req), wrb, &cmd);
3384 3385 3386 3387 3388 3389 3390

	req->hdr.version = 1;
	req->query_options = BE_GET_WOL_CAP;

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3391

K
Kalesh AP 已提交
3392
		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3393 3394

		adapter->wol_cap = resp->wol_settings;
S
Suresh Reddy 已提交
3395 3396
		if (adapter->wol_cap & BE_WOL_CAP)
			adapter->wol_en = true;
3397 3398 3399
	}
err:
	mutex_unlock(&adapter->mbox_lock);
S
Suresh Reddy 已提交
3400
	if (cmd.va)
3401 3402
		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
				  cmd.dma);
3403
	return status;
3404 3405

}
3406 3407 3408 3409 3410 3411 3412 3413 3414 3415

int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
{
	struct be_dma_mem extfat_cmd;
	struct be_fat_conf_params *cfgs;
	int status;
	int i, j;

	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3416 3417 3418
	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
					    extfat_cmd.size, &extfat_cmd.dma,
					    GFP_ATOMIC);
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
	if (!extfat_cmd.va)
		return -ENOMEM;

	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
	if (status)
		goto err;

	cfgs = (struct be_fat_conf_params *)
			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3430

3431 3432 3433 3434 3435 3436 3437 3438 3439
		for (j = 0; j < num_modes; j++) {
			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
				cfgs->module[i].trace_lvl[j].dbg_lvl =
							cpu_to_le32(level);
		}
	}

	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
err:
3440 3441
	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
			  extfat_cmd.dma);
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453
	return status;
}

int be_cmd_get_fw_log_level(struct be_adapter *adapter)
{
	struct be_dma_mem extfat_cmd;
	struct be_fat_conf_params *cfgs;
	int status, j;
	int level = 0;

	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3454 3455 3456
	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
					    extfat_cmd.size, &extfat_cmd.dma,
					    GFP_ATOMIC);
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467

	if (!extfat_cmd.va) {
		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
			__func__);
		goto err;
	}

	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
	if (!status) {
		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
						sizeof(struct be_cmd_resp_hdr));
3468

3469 3470 3471 3472 3473
		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
		}
	}
3474 3475
	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
			  extfat_cmd.dma);
3476 3477 3478 3479
err:
	return level;
}

3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
				   struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_ext_fat_caps *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd->va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
			       cmd->size, wrb, cmd);
	req->parameter_type = cpu_to_le32(1);

	status = be_mbox_notify_wait(adapter);
err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}

int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
				   struct be_dma_mem *cmd,
				   struct be_fat_conf_params *configs)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_ext_fat_caps *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd->va;
	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
			       cmd->size, wrb, cmd);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
3534
}
3535

3536
int be_cmd_query_port_name(struct be_adapter *adapter)
3537 3538
{
	struct be_cmd_req_get_port_name *req;
3539
	struct be_mcc_wrb *wrb;
3540 3541
	int status;

3542 3543
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
3544

3545
	wrb = wrb_from_mbox(adapter);
3546 3547 3548 3549 3550
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
			       NULL);
3551 3552
	if (!BEx_chip(adapter))
		req->hdr.version = 1;
3553

3554
	status = be_mbox_notify_wait(adapter);
3555 3556
	if (!status) {
		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3557

3558
		adapter->port_name = resp->port_name[adapter->hba_port_num];
3559
	} else {
3560
		adapter->port_name = adapter->hba_port_num + '0';
3561
	}
3562 3563

	mutex_unlock(&adapter->mbox_lock);
3564 3565 3566
	return status;
}

3567 3568 3569 3570 3571 3572 3573 3574
/* Descriptor type */
enum {
	FUNC_DESC = 1,
	VFT_DESC = 2
};

static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
					       int desc_type)
3575
{
3576
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3577
	struct be_nic_res_desc *nic;
3578 3579 3580
	int i;

	for (i = 0; i < desc_count; i++) {
3581
		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3582 3583 3584 3585 3586 3587 3588
		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
			nic = (struct be_nic_res_desc *)hdr;
			if (desc_type == FUNC_DESC ||
			    (desc_type == VFT_DESC &&
			     nic->flags & (1 << VFT_SHIFT)))
				return nic;
		}
3589

3590 3591
		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
3592
	}
3593 3594 3595
	return NULL;
}

3596 3597 3598 3599 3600 3601 3602 3603 3604 3605
static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
{
	return be_get_nic_desc(buf, desc_count, VFT_DESC);
}

static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
{
	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
}

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619
static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
						 u32 desc_count)
{
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
	struct be_pcie_res_desc *pcie;
	int i;

	for (i = 0; i < desc_count; i++) {
		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
			pcie = (struct be_pcie_res_desc	*)hdr;
			if (pcie->pf_num == devfn)
				return pcie;
		}
3620

3621 3622 3623
		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
	}
3624
	return NULL;
3625 3626
}

3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
{
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
	int i;

	for (i = 0; i < desc_count; i++) {
		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
			return (struct be_port_res_desc *)hdr;

		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
	}
	return NULL;
}

3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
static void be_copy_nic_desc(struct be_resources *res,
			     struct be_nic_res_desc *desc)
{
	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
	res->max_vlans = le16_to_cpu(desc->vlan_count);
	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
	res->max_tx_qs = le16_to_cpu(desc->txq_count);
	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
	res->max_rx_qs = le16_to_cpu(desc->rq_count);
	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3652 3653 3654
	res->max_cq_count = le16_to_cpu(desc->cq_count);
	res->max_iface_count = le16_to_cpu(desc->iface_count);
	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3655 3656 3657 3658 3659
	/* Clear flags that driver is not interested in */
	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
				BE_IF_CAP_FLAGS_WANT;
}

3660
/* Uses Mbox */
3661
int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3662 3663 3664 3665 3666 3667
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_func_config *req;
	int status;
	struct be_dma_mem cmd;

S
Suresh Reddy 已提交
3668 3669 3670
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

3671 3672
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3673 3674
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
3675 3676
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
S
Suresh Reddy 已提交
3677 3678
		status = -ENOMEM;
		goto err;
3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_FUNC_CONFIG,
			       cmd.size, wrb, &cmd);

3693 3694 3695
	if (skyhawk_chip(adapter))
		req->hdr.version = 1;

3696 3697 3698 3699
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_func_config *resp = cmd.va;
		u32 desc_count = le32_to_cpu(resp->desc_count);
3700
		struct be_nic_res_desc *desc;
3701

3702
		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3703 3704 3705 3706 3707
		if (!desc) {
			status = -EINVAL;
			goto err;
		}

3708
		adapter->pf_number = desc->pf_num;
3709
		be_copy_nic_desc(res, desc);
3710 3711 3712
	}
err:
	mutex_unlock(&adapter->mbox_lock);
S
Suresh Reddy 已提交
3713
	if (cmd.va)
3714 3715
		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
				  cmd.dma);
3716 3717 3718
	return status;
}

3719
/* Will use MBOX only if MCCQ has not been created */
3720
int be_cmd_get_profile_config(struct be_adapter *adapter,
3721
			      struct be_resources *res, u8 query, u8 domain)
3722
{
3723
	struct be_cmd_resp_get_profile_config *resp;
3724
	struct be_cmd_req_get_profile_config *req;
3725
	struct be_nic_res_desc *vf_res;
3726
	struct be_pcie_res_desc *pcie;
3727
	struct be_port_res_desc *port;
3728
	struct be_nic_res_desc *nic;
3729
	struct be_mcc_wrb wrb = {0};
3730
	struct be_dma_mem cmd;
3731
	u16 desc_count;
3732 3733 3734
	int status;

	memset(&cmd, 0, sizeof(struct be_dma_mem));
3735
	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3736 3737
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
3738
	if (!cmd.va)
3739 3740
		return -ENOMEM;

3741 3742 3743 3744 3745 3746 3747 3748 3749 3750
	req = cmd.va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_PROFILE_CONFIG,
			       cmd.size, &wrb, &cmd);

	req->hdr.domain = domain;
	if (!lancer_chip(adapter))
		req->hdr.version = 1;
	req->type = ACTIVE_PROFILE_TYPE;

3751 3752 3753 3754 3755 3756 3757
	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
	 * descriptors with all bits set to "1" for the fields which can be
	 * modified using SET_PROFILE_CONFIG cmd.
	 */
	if (query == RESOURCE_MODIFIABLE)
		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;

3758
	status = be_cmd_notify_wait(adapter, &wrb);
3759 3760
	if (status)
		goto err;
3761

3762
	resp = cmd.va;
3763
	desc_count = le16_to_cpu(resp->desc_count);
3764

3765 3766
	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
				desc_count);
3767
	if (pcie)
3768
		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3769

3770 3771 3772 3773
	port = be_get_port_desc(resp->func_param, desc_count);
	if (port)
		adapter->mc_type = port->mc_type;

3774
	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3775 3776 3777
	if (nic)
		be_copy_nic_desc(res, nic);

3778 3779 3780
	vf_res = be_get_vft_desc(resp->func_param, desc_count);
	if (vf_res)
		res->vf_if_cap_flags = vf_res->cap_flags;
3781
err:
3782
	if (cmd.va)
3783 3784
		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
				  cmd.dma);
3785 3786 3787
	return status;
}

3788 3789 3790
/* Will use MBOX only if MCCQ has not been created */
static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
				     int size, int count, u8 version, u8 domain)
3791 3792
{
	struct be_cmd_req_set_profile_config *req;
3793 3794
	struct be_mcc_wrb wrb = {0};
	struct be_dma_mem cmd;
3795 3796
	int status;

3797 3798
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3799 3800
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_ATOMIC);
3801 3802
	if (!cmd.va)
		return -ENOMEM;
3803

3804
	req = cmd.va;
3805
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3806 3807
			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
			       &wrb, &cmd);
3808
	req->hdr.version = version;
3809
	req->hdr.domain = domain;
3810
	req->desc_count = cpu_to_le32(count);
3811 3812
	memcpy(req->desc, desc, size);

3813 3814 3815
	status = be_cmd_notify_wait(adapter, &wrb);

	if (cmd.va)
3816 3817
		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
				  cmd.dma);
3818 3819 3820
	return status;
}

3821
/* Mark all fields invalid */
3822
static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835
{
	memset(nic, 0, sizeof(*nic));
	nic->unicast_mac_count = 0xFFFF;
	nic->mcc_count = 0xFFFF;
	nic->vlan_count = 0xFFFF;
	nic->mcast_mac_count = 0xFFFF;
	nic->txq_count = 0xFFFF;
	nic->rq_count = 0xFFFF;
	nic->rssq_count = 0xFFFF;
	nic->lro_count = 0xFFFF;
	nic->cq_count = 0xFFFF;
	nic->toe_conn_count = 0xFFFF;
	nic->eq_count = 0xFFFF;
3836
	nic->iface_count = 0xFFFF;
3837
	nic->link_param = 0xFF;
3838
	nic->channel_id_param = cpu_to_le16(0xF000);
3839 3840
	nic->acpi_params = 0xFF;
	nic->wol_param = 0x0F;
3841 3842
	nic->tunnel_iface_count = 0xFFFF;
	nic->direct_tenant_iface_count = 0xFFFF;
3843
	nic->bw_min = 0xFFFFFFFF;
3844 3845 3846
	nic->bw_max = 0xFFFFFFFF;
}

3847 3848 3849 3850 3851 3852 3853 3854 3855 3856
/* Mark all fields invalid */
static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
{
	memset(pcie, 0, sizeof(*pcie));
	pcie->sriov_state = 0xFF;
	pcie->pf_state = 0xFF;
	pcie->pf_type = 0xFF;
	pcie->num_vfs = 0xFFFF;
}

3857 3858
int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
		      u8 domain)
3859
{
3860 3861 3862 3863 3864 3865
	struct be_nic_res_desc nic_desc;
	u32 bw_percent;
	u16 version = 0;

	if (BE3_chip(adapter))
		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3866

3867 3868 3869
	be_reset_nic_desc(&nic_desc);
	nic_desc.pf_num = adapter->pf_number;
	nic_desc.vf_num = domain;
3870
	nic_desc.bw_min = 0;
3871
	if (lancer_chip(adapter)) {
3872 3873 3874 3875
		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
					(1 << NOSV_SHIFT);
3876
		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3877
	} else {
3878 3879 3880 3881 3882 3883
		version = 1;
		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
		nic_desc.bw_max = cpu_to_le32(bw_percent);
3884
	}
3885 3886 3887

	return be_cmd_set_profile_config(adapter, &nic_desc,
					 nic_desc.hdr.desc_len,
3888 3889 3890
					 1, version, domain);
}

3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
static void be_fill_vf_res_template(struct be_adapter *adapter,
				    struct be_resources pool_res,
				    u16 num_vfs, u16 num_vf_qs,
				    struct be_nic_res_desc *nic_vft)
{
	u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
	struct be_resources res_mod = {0};

	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
	 * which are modifiable using SET_PROFILE_CONFIG cmd.
	 */
	be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);

	/* If RSS IFACE capability flags are modifiable for a VF, set the
	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
	 * more than 1 RSSQ is available for a VF.
	 * Otherwise, provision only 1 queue pair for VF.
	 */
	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
		if (num_vf_qs > 1) {
			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
			if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
		} else {
			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
					     BE_IF_FLAGS_DEFQ_RSS);
		}

		nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
	} else {
		num_vf_qs = 1;
	}

	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
	nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
					(num_vfs + 1));

	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
	 * among the PF and it's VFs, if the fields are changeable
	 */
	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
		nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
							 (num_vfs + 1));

	if (res_mod.max_vlans == FIELD_MODIFIABLE)
		nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
						  (num_vfs + 1));

	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
		nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
						   (num_vfs + 1));

	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
		nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
						 (num_vfs + 1));
}

3951
int be_cmd_set_sriov_config(struct be_adapter *adapter,
3952 3953
			    struct be_resources pool_res, u16 num_vfs,
			    u16 num_vf_qs)
3954 3955 3956 3957 3958 3959 3960 3961 3962 3963
{
	struct {
		struct be_pcie_res_desc pcie;
		struct be_nic_res_desc nic_vft;
	} __packed desc;

	/* PF PCIE descriptor */
	be_reset_pcie_desc(&desc.pcie);
	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3964
	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3965 3966 3967 3968 3969 3970 3971 3972
	desc.pcie.pf_num = adapter->pdev->devfn;
	desc.pcie.sriov_state = num_vfs ? 1 : 0;
	desc.pcie.num_vfs = cpu_to_le16(num_vfs);

	/* VF NIC Template descriptor */
	be_reset_nic_desc(&desc.nic_vft);
	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3973
	desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3974 3975 3976
	desc.nic_vft.pf_num = adapter->pdev->devfn;
	desc.nic_vft.vf_num = 0;

3977 3978
	be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
				&desc.nic_vft);
3979 3980 3981

	return be_cmd_set_profile_config(adapter, &desc,
					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
}

int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_manage_iface_filters *req;
	int status;

	if (iface == 0xFFFFFFFF)
		return -1;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
			       wrb, NULL);
	req->op = op;
	req->target_iface_id = cpu_to_le32(iface);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
{
	struct be_port_res_desc port_desc;

	memset(&port_desc, 0, sizeof(port_desc));
	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
	port_desc.link_num = adapter->hba_port_num;
	if (port) {
		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
					(1 << RCVID_SHIFT);
		port_desc.nv_port = swab16(port);
	} else {
		port_desc.nv_flags = NV_TYPE_DISABLED;
		port_desc.nv_port = 0;
	}

	return be_cmd_set_profile_config(adapter, &port_desc,
4033
					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4034 4035
}

4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
		     int vf_num)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_iface_list *req;
	struct be_cmd_resp_get_iface_list *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
			       wrb, NULL);
	req->hdr.domain = vf_num + 1;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		resp = (struct be_cmd_resp_get_iface_list *)req;
		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
static int lancer_wait_idle(struct be_adapter *adapter)
{
#define SLIPORT_IDLE_TIMEOUT 30
	u32 reg_val;
	int status = 0, i;

	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
			break;

		ssleep(1);
	}

	if (i == SLIPORT_IDLE_TIMEOUT)
		status = -1;

	return status;
}

int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
{
	int status = 0;

	status = lancer_wait_idle(adapter);
	if (status)
		return status;

	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);

	return status;
}

/* Routine to check whether dump image is present or not */
bool dump_present(struct be_adapter *adapter)
{
	u32 sliport_status = 0;

	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
}

int lancer_initiate_dump(struct be_adapter *adapter)
{
4113
	struct device *dev = &adapter->pdev->dev;
4114 4115
	int status;

4116 4117 4118 4119 4120
	if (dump_present(adapter)) {
		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
		return -EEXIST;
	}

4121 4122 4123 4124
	/* give firmware reset and diagnostic dump */
	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
				     PHYSDEV_CONTROL_DD_MASK);
	if (status < 0) {
4125
		dev_err(dev, "FW reset failed\n");
4126 4127 4128 4129 4130 4131 4132 4133
		return status;
	}

	status = lancer_wait_idle(adapter);
	if (status)
		return status;

	if (!dump_present(adapter)) {
4134 4135
		dev_err(dev, "FW dump not generated\n");
		return -EIO;
4136 4137 4138 4139 4140
	}

	return 0;
}

4141 4142 4143 4144 4145 4146 4147 4148
int lancer_delete_dump(struct be_adapter *adapter)
{
	int status;

	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
	return be_cmd_status(status);
}

4149 4150 4151 4152 4153 4154 4155
/* Uses sync mcc */
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_enable_disable_vf *req;
	int status;

4156
	if (BEx_chip(adapter))
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180
		return 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
			       wrb, NULL);

	req->hdr.domain = domain;
	req->enable = 1;
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205
int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_intr_set *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
			       wrb, NULL);

	req->intr_enabled = intr_enable;

	status = be_mbox_notify_wait(adapter);

	mutex_unlock(&adapter->mbox_lock);
	return status;
}

4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
/* Uses MBOX */
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
{
	struct be_cmd_req_get_active_profile *req;
	struct be_mcc_wrb *wrb;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
			       wrb, NULL);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_active_profile *resp =
							embedded_payload(wrb);
4232

4233 4234 4235 4236 4237 4238 4239 4240
		*profile_id = le16_to_cpu(resp->active_profile_id);
	}

err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}

4241 4242 4243 4244 4245 4246 4247 4248
int be_cmd_set_logical_link_config(struct be_adapter *adapter,
				   int link_state, u8 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_ll_link *req;
	int status;

	if (BEx_chip(adapter) || lancer_chip(adapter))
4249
		return -EOPNOTSUPP;
4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
			       sizeof(*req), wrb, NULL);

	req->hdr.version = 1;
	req->hdr.domain = domain;

	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
		req->link_config |= 1;

	if (link_state == IFLA_VF_LINK_STATE_AUTO)
		req->link_config |= 1 << PLINK_TRACK_SHIFT;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4280
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4281
		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4282 4283 4284
{
	struct be_adapter *adapter = netdev_priv(netdev_handle);
	struct be_mcc_wrb *wrb;
K
Kalesh AP 已提交
4285
	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316
	struct be_cmd_req_hdr *req;
	struct be_cmd_resp_hdr *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);
	resp = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
			       hdr->opcode, wrb_payload_size, wrb, NULL);
	memcpy(req, wrb_payload, wrb_payload_size);
	be_dws_cpu_to_le(req, wrb_payload_size);

	status = be_mcc_notify_wait(adapter);
	if (cmd_status)
		*cmd_status = (status & 0xffff);
	if (ext_status)
		*ext_status = 0;
	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);