be_cmds.c 33.6 KB
Newer Older
S
Sathya Perla 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2005 - 2009 ServerEngines
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25 26 27
	u32 val = 0;

	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 30 31 32 33
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
34
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 36 37 38 39 40 41 42 43 44 45
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
46
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 48 49 50
{
	compl->flags = 0;
}

51
static int be_mcc_compl_process(struct be_adapter *adapter,
52
	struct be_mcc_compl *compl)
53 54 55 56 57 58 59 60 61
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
62 63 64 65 66 67 68 69 70
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
						adapter->stats.cmd.va;
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
		}
	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71 72
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
73 74
		dev_warn(&adapter->pdev->dev,
			"Error in cmd completion: status(compl/extd)=%d/%d\n",
75 76
			compl_status, extd_status);
	}
77
	return compl_status;
78 79
}

80
/* Link state evt is a string of bytes; no need for endian swapping */
81
static void be_async_link_state_process(struct be_adapter *adapter,
82 83
		struct be_async_event_link_state *evt)
{
84 85
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 87 88 89 90 91 92 93
}

static inline bool is_link_state_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_LINK_STATE);
}
94

95
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96
{
97
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99 100 101 102 103 104 105 106

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

107
int be_process_mcc(struct be_adapter *adapter)
108
{
109
	struct be_mcc_compl *compl;
110
	int num = 0, status = 0;
111

112 113
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
114 115 116 117 118
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
			BUG_ON(!is_link_state_evt(compl->flags));

			/* Interpret compl as a async link evt */
119
			be_async_link_state_process(adapter,
120
				(struct be_async_event_link_state *) compl);
121 122 123
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
				status = be_mcc_compl_process(adapter, compl);
				atomic_dec(&adapter->mcc_obj.q.used);
124 125 126 127
		}
		be_mcc_compl_use(compl);
		num++;
	}
128

129
	if (num)
130
		be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131

132
	spin_unlock_bh(&adapter->mcc_cq_lock);
133
	return status;
134 135
}

136
/* Wait till no more pending mcc requests are present */
137
static int be_mcc_wait_compl(struct be_adapter *adapter)
138
{
139 140
#define mcc_timeout		120000 /* 12s timeout */
	int i, status;
141
	for (i = 0; i < mcc_timeout; i++) {
142 143 144 145
		status = be_process_mcc(adapter);
		if (status)
			return status;

146
		if (atomic_read(&adapter->mcc_obj.q.used) == 0)
147 148 149
			break;
		udelay(100);
	}
150
	if (i == mcc_timeout) {
151
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152 153 154
		return -1;
	}
	return 0;
155 156 157
}

/* Notify MCC requests and wait for completion */
158
static int be_mcc_notify_wait(struct be_adapter *adapter)
159
{
160
	be_mcc_notify(adapter);
161
	return be_mcc_wait_compl(adapter);
162 163
}

164
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
165 166 167 168 169 170 171 172 173
{
	int cnt = 0, wait = 5;
	u32 ready;

	do {
		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
		if (ready)
			break;

174
		if (cnt > 4000000) {
175
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
S
Sathya Perla 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189
			return -1;
		}

		if (cnt > 50)
			wait = 200;
		cnt += wait;
		udelay(wait);
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
190
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
191
 */
192
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
193 194 195
{
	int status;
	u32 val = 0;
196 197
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
198
	struct be_mcc_mailbox *mbox = mbox_mem->va;
199
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
200 201 202 203 204 205 206

	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
207
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
208 209 210 211 212 213 214 215
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

216
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
217 218 219
	if (status != 0)
		return status;

220
	/* A cq entry has been made now */
221 222 223
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
224 225 226
		if (status)
			return status;
	} else {
227
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
228 229
		return -1;
	}
230
	return 0;
S
Sathya Perla 已提交
231 232
}

233
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
234
{
235
	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

244
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
245
{
246 247
	u16 stage;
	int status, timeout = 0;
S
Sathya Perla 已提交
248

249 250 251 252 253 254 255 256 257 258 259 260 261 262
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
				stage);
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(2 * HZ);
			timeout += 2;
		} else {
			return 0;
		}
	} while (timeout < 20);
S
Sathya Perla 已提交
263

264 265
	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
	return -1;
S
Sathya Perla 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
				bool embedded, u8 sge_cnt)
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
	be_dws_cpu_to_le(wrb, 20);
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

338
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
339
{
340 341 342 343 344
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
345 346
}

347
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
348
{
349 350 351
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

352 353 354 355 356
	if (atomic_read(&mccq->used) >= mccq->len) {
		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
		return NULL;
	}

357 358 359 360
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
361 362 363
	return wrb;
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}
415
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
416 417
		struct be_queue_info *eq, int eq_delay)
{
418 419
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
420 421 422
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

423
	spin_lock(&adapter->mbox_lock);
424 425 426

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
427 428 429 430 431 432 433 434 435

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
436
			be_pci_func(adapter));
S
Sathya Perla 已提交
437 438 439 440 441 442 443 444 445 446 447
	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

448
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
449
	if (!status) {
450
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
451 452 453
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
454

455
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
456 457 458
	return status;
}

459
/* Uses mbox */
460
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
461 462
			u8 type, bool permanent, u32 if_handle)
{
463 464
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
465 466
	int status;

467
	spin_lock(&adapter->mbox_lock);
468 469 470

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
471 472 473 474 475 476 477 478 479 480

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
481
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
482 483 484
		req->permanent = 0;
	}

485 486 487
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
488
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
489
	}
S
Sathya Perla 已提交
490

491
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
492 493 494
	return status;
}

495
/* Uses synchronous MCCQ */
496
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
497 498
		u32 if_id, u32 *pmac_id)
{
499 500
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
501 502
	int status;

503 504 505
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
506 507 508 509
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
510
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
511 512 513 514 515 516 517 518 519

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

520
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
521 522 523 524 525
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

526
err:
527
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
528 529 530
	return status;
}

531
/* Uses synchronous MCCQ */
532
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
S
Sathya Perla 已提交
533
{
534 535
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
536 537
	int status;

538 539 540
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
541 542 543 544
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
545
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
546 547 548 549 550 551 552 553 554

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

555 556
	status = be_mcc_notify_wait(adapter);

557
err:
558
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
559 560 561
	return status;
}

562
/* Uses Mbox */
563
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
564 565 566
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
567 568
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
569
	struct be_dma_mem *q_mem = &cq->dma_mem;
570
	void *ctxt;
S
Sathya Perla 已提交
571 572
	int status;

573
	spin_lock(&adapter->mbox_lock);
574 575 576 577

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
			__ilog2_u32(cq->len/256));
	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
594
	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
595
	AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
S
Sathya Perla 已提交
596 597 598 599
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

600
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
601
	if (!status) {
602
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
603 604 605
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
606

607
	spin_unlock(&adapter->mbox_lock);
608 609 610 611 612 613 614 615 616 617 618 619

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

620
int be_cmd_mccq_create(struct be_adapter *adapter,
621 622 623
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
624 625
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
626
	struct be_dma_mem *q_mem = &mccq->dma_mem;
627
	void *ctxt;
628 629
	int status;

630
	spin_lock(&adapter->mbox_lock);
631 632 633 634

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
635 636 637 638 639 640 641 642

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);

643
	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
644 645 646 647 648 649 650 651 652
	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
		be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

653
	status = be_mbox_notify_wait(adapter);
654 655 656 657 658
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
659
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
660 661 662 663

	return status;
}

664
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
665 666 667
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
668 669
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
670
	struct be_dma_mem *q_mem = &txq->dma_mem;
671
	void *ctxt;
S
Sathya Perla 已提交
672 673
	int status;

674
	spin_lock(&adapter->mbox_lock);
675 676 677 678

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
679 680 681 682 683 684 685 686 687 688

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

689 690
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
691
	AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
692
			be_pci_func(adapter));
S
Sathya Perla 已提交
693 694 695 696 697 698 699
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

700
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
701 702 703 704 705
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
706

707
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
708 709 710 711

	return status;
}

712
/* Uses mbox */
713
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
714 715 716
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		u16 max_frame_size, u32 if_id, u32 rss)
{
717 718
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
719 720 721
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

722
	spin_lock(&adapter->mbox_lock);
723 724 725

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
726 727 728 729 730 731 732 733 734 735 736 737 738 739

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

740
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
741 742 743 744 745
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
	}
746

747
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
748 749 750 751

	return status;
}

752 753 754
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
755
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
756 757
		int queue_type)
{
758 759
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
760 761 762
	u8 subsys = 0, opcode = 0;
	int status;

763
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
764

765 766 767
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
787 788 789 790
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
791
	default:
792
		BUG();
S
Sathya Perla 已提交
793 794 795 796
	}
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

797
	status = be_mbox_notify_wait(adapter);
798

799
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
800 801 802 803

	return status;
}

804 805 806
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
807 808
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
S
Sathya Perla 已提交
809
{
810 811
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
812 813
	int status;

814
	spin_lock(&adapter->mbox_lock);
815 816 817

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
818 819 820 821 822 823

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

824 825
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
826
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
827 828 829
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

830
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
831 832 833 834 835 836 837
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

838
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
839 840 841
	return status;
}

842
/* Uses mbox */
843
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
S
Sathya Perla 已提交
844
{
845 846
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
847 848
	int status;

849
	spin_lock(&adapter->mbox_lock);
850 851 852

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
853 854 855 856 857 858 859

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

	req->interface_id = cpu_to_le32(interface_id);
860 861

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
862

863
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
864 865 866 867 868 869

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
870
 * Uses asynchronous MCC
S
Sathya Perla 已提交
871
 */
872
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
873
{
874 875 876
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
877
	int status = 0;
S
Sathya Perla 已提交
878

879
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
880

881
	wrb = wrb_from_mccq(adapter);
882 883 884 885
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
886 887
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
888 889

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
890
	wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
S
Sathya Perla 已提交
891 892 893 894 895 896 897

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

898
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
899

900
err:
901
	spin_unlock_bh(&adapter->mcc_lock);
902
	return status;
S
Sathya Perla 已提交
903 904
}

905
/* Uses synchronous mcc */
906
int be_cmd_link_status_query(struct be_adapter *adapter,
907
			bool *link_up, u8 *mac_speed, u16 *link_speed)
S
Sathya Perla 已提交
908
{
909 910
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
911 912
	int status;

913 914 915
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
916 917 918 919
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
920
	req = embedded_payload(wrb);
921 922

	*link_up = false;
S
Sathya Perla 已提交
923 924 925 926 927 928

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

929
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
930 931
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
932
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
933
			*link_up = true;
934 935 936
			*link_speed = le16_to_cpu(resp->link_speed);
			*mac_speed = resp->mac_speed;
		}
S
Sathya Perla 已提交
937 938
	}

939
err:
940
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
941 942 943
	return status;
}

944
/* Uses Mbox */
945
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
946
{
947 948
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
949 950
	int status;

951
	spin_lock(&adapter->mbox_lock);
952 953 954

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
955 956 957 958 959 960

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

961
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
962 963 964 965 966
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

967
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
968 969 970
	return status;
}

971 972 973
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
974
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
975
{
976 977
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
978
	int status = 0;
S
Sathya Perla 已提交
979

980 981 982
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
983 984 985 986
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
987
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
988 989 990 991 992 993 994 995 996 997 998

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

999
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
1000

1001
err:
1002
	spin_unlock_bh(&adapter->mcc_lock);
1003
	return status;
S
Sathya Perla 已提交
1004 1005
}

1006
/* Uses sycnhronous mcc */
1007
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
1008 1009
			u32 num, bool untagged, bool promiscuous)
{
1010 1011
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
1012 1013
	int status;

1014 1015 1016
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1017 1018 1019 1020
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1021
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

1037
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1038

1039
err:
1040
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1041 1042 1043
	return status;
}

1044 1045 1046
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
1047
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
1048
{
1049 1050
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
1051
	int status;
S
Sathya Perla 已提交
1052

1053
	spin_lock_bh(&adapter->mcc_lock);
1054

1055
	wrb = wrb_from_mccq(adapter);
1056 1057 1058 1059
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1060
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

1072
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1073

1074
err:
1075
	spin_unlock_bh(&adapter->mcc_lock);
1076
	return status;
S
Sathya Perla 已提交
1077 1078
}

1079
/*
1080
 * Uses MCC for this command as it may be called in BH context
1081 1082
 * (mc == NULL) => multicast promiscous
 */
1083
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1084 1085
		struct dev_mc_list *mc_list, u32 mc_count,
		struct be_dma_mem *mem)
S
Sathya Perla 已提交
1086
{
1087
	struct be_mcc_wrb *wrb;
1088 1089 1090
	struct be_cmd_req_mcast_mac_config *req = mem->va;
	struct be_sge *sge;
	int status;
S
Sathya Perla 已提交
1091

1092
	spin_lock_bh(&adapter->mcc_lock);
1093

1094
	wrb = wrb_from_mccq(adapter);
1095 1096 1097 1098
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1099 1100
	sge = nonembedded_sgl(wrb);
	memset(req, 0, sizeof(*req));
S
Sathya Perla 已提交
1101

1102 1103 1104 1105
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(mem->size);
S
Sathya Perla 已提交
1106 1107 1108 1109 1110

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
1111
	if (mc_list) {
1112 1113 1114 1115 1116 1117 1118 1119 1120
		int i;
		struct dev_mc_list *mc;

		req->num_mac = cpu_to_le16(mc_count);

		for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
			memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1121 1122
	}

1123
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1124

1125
err:
1126
	spin_unlock_bh(&adapter->mcc_lock);
1127
	return status;
S
Sathya Perla 已提交
1128 1129
}

1130
/* Uses synchrounous mcc */
1131
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1132
{
1133 1134
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1135 1136
	int status;

1137
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1138

1139
	wrb = wrb_from_mccq(adapter);
1140 1141 1142 1143
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1144
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1154
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1155

1156
err:
1157
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1158 1159 1160
	return status;
}

1161
/* Uses sycn mcc */
1162
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1163
{
1164 1165
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1166 1167
	int status;

1168
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1169

1170
	wrb = wrb_from_mccq(adapter);
1171 1172 1173 1174
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1175
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1176 1177 1178 1179 1180 1181

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1182
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1183 1184 1185 1186 1187 1188 1189
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1190
err:
1191
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1192 1193 1194
	return status;
}

1195
/* Uses mbox */
1196
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
S
Sathya Perla 已提交
1197
{
1198 1199
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1200 1201
	int status;

1202
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1203

1204 1205
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1206 1207 1208 1209 1210 1211

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1212
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1213 1214 1215
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
1216
		*cap = le32_to_cpu(resp->function_cap);
S
Sathya Perla 已提交
1217 1218
	}

1219
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1220 1221
	return status;
}
1222

1223
/* Uses mbox */
1224 1225
int be_cmd_reset_function(struct be_adapter *adapter)
{
1226 1227
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1228 1229 1230 1231
	int status;

	spin_lock(&adapter->mbox_lock);

1232 1233
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1234 1235 1236 1237 1238 1239

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1240
	status = be_mbox_notify_wait(adapter);
1241 1242 1243 1244

	spin_unlock(&adapter->mbox_lock);
	return status;
}
1245

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
			u8 bcn, u8 sts, u8 state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1257 1258 1259 1260
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

1275
err:
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1290 1291 1292 1293
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
		*state = resp->beacon_state;
	}

1310
err:
1311 1312 1313 1314
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
/* Uses sync mcc */
int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
				u8 *connector)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_port_type *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1326 1327 1328 1329
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));

	req->port = cpu_to_le32(port);
	req->page_num = cpu_to_le32(TR_PAGE_A0);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
			*connector = resp->data.connector;
	}

1345
err:
1346 1347 1348 1349
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1350 1351 1352
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1353
	struct be_mcc_wrb *wrb;
1354
	struct be_cmd_write_flashrom *req = cmd->va;
1355
	struct be_sge *sge;
1356 1357
	int status;

1358 1359 1360
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1361 1362 1363 1364 1365
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
1366 1367
	sge = nonembedded_sgl(wrb);

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1380
	status = be_mcc_notify_wait(adapter);
1381

1382
err:
1383
	spin_unlock_bh(&adapter->mcc_lock);
1384 1385
	return status;
}
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395

int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_write_flashrom *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1396 1397 1398 1399
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);

	req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
	req->params.offset = 0x3FFFC;
	req->params.data_buf_size = 0x4;

	status = be_mcc_notify_wait(adapter);
	if (!status)
		memcpy(flashed_crc, req->params.data_buf, 4);

1416
err:
1417 1418 1419
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}