be_cmds.c 30.1 KB
Newer Older
S
Sathya Perla 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2005 - 2009 ServerEngines
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25 26 27
	u32 val = 0;

	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 30 31 32 33
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
34
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 36 37 38 39 40 41 42 43 44 45
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
46
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 48 49 50
{
	compl->flags = 0;
}

51
static int be_mcc_compl_process(struct be_adapter *adapter,
52
	struct be_mcc_compl *compl)
53 54 55 56 57 58 59 60 61
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
62 63 64 65 66 67 68 69 70
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
						adapter->stats.cmd.va;
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
		}
	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71 72
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
73 74
		dev_warn(&adapter->pdev->dev,
			"Error in cmd completion: status(compl/extd)=%d/%d\n",
75 76
			compl_status, extd_status);
	}
77
	return compl_status;
78 79
}

80
/* Link state evt is a string of bytes; no need for endian swapping */
81
static void be_async_link_state_process(struct be_adapter *adapter,
82 83
		struct be_async_event_link_state *evt)
{
84 85
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 87 88 89 90 91 92 93
}

static inline bool is_link_state_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_LINK_STATE);
}
94

95
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96
{
97
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99 100 101 102 103 104 105 106

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

107
int be_process_mcc(struct be_adapter *adapter)
108
{
109
	struct be_mcc_compl *compl;
110
	int num = 0, status = 0;
111

112 113
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
114 115 116 117 118
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
			BUG_ON(!is_link_state_evt(compl->flags));

			/* Interpret compl as a async link evt */
119
			be_async_link_state_process(adapter,
120
				(struct be_async_event_link_state *) compl);
121 122 123
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
				status = be_mcc_compl_process(adapter, compl);
				atomic_dec(&adapter->mcc_obj.q.used);
124 125 126 127
		}
		be_mcc_compl_use(compl);
		num++;
	}
128

129
	if (num)
130
		be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131

132
	spin_unlock_bh(&adapter->mcc_cq_lock);
133
	return status;
134 135
}

136
/* Wait till no more pending mcc requests are present */
137
static int be_mcc_wait_compl(struct be_adapter *adapter)
138
{
139 140
#define mcc_timeout		120000 /* 12s timeout */
	int i, status;
141
	for (i = 0; i < mcc_timeout; i++) {
142 143 144 145
		status = be_process_mcc(adapter);
		if (status)
			return status;

146
		if (atomic_read(&adapter->mcc_obj.q.used) == 0)
147 148 149
			break;
		udelay(100);
	}
150
	if (i == mcc_timeout) {
151
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152 153 154
		return -1;
	}
	return 0;
155 156 157
}

/* Notify MCC requests and wait for completion */
158
static int be_mcc_notify_wait(struct be_adapter *adapter)
159
{
160
	be_mcc_notify(adapter);
161
	return be_mcc_wait_compl(adapter);
162 163
}

164
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
165 166 167 168 169 170 171 172 173
{
	int cnt = 0, wait = 5;
	u32 ready;

	do {
		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
		if (ready)
			break;

174
		if (cnt > 4000000) {
175
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
S
Sathya Perla 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189
			return -1;
		}

		if (cnt > 50)
			wait = 200;
		cnt += wait;
		udelay(wait);
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
190
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
191
 */
192
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
193 194 195
{
	int status;
	u32 val = 0;
196 197
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
198
	struct be_mcc_mailbox *mbox = mbox_mem->va;
199
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
200 201 202 203 204 205 206

	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
207
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
208 209 210 211 212 213 214 215
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

216
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
217 218 219
	if (status != 0)
		return status;

220
	/* A cq entry has been made now */
221 222 223
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
224 225 226
		if (status)
			return status;
	} else {
227
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
228 229
		return -1;
	}
230
	return 0;
S
Sathya Perla 已提交
231 232
}

233
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
234
{
235
	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

244
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
245
{
246 247
	u16 stage;
	int status, timeout = 0;
S
Sathya Perla 已提交
248

249 250 251 252 253 254 255 256 257 258 259 260 261 262
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
				stage);
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(2 * HZ);
			timeout += 2;
		} else {
			return 0;
		}
	} while (timeout < 20);
S
Sathya Perla 已提交
263

264 265
	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
	return -1;
S
Sathya Perla 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
				bool embedded, u8 sge_cnt)
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
	be_dws_cpu_to_le(wrb, 20);
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

338
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
339
{
340 341 342 343 344
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
345 346
}

347
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
348
{
349 350 351 352 353 354 355 356
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
357 358 359
	return wrb;
}

360
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
361 362
		struct be_queue_info *eq, int eq_delay)
{
363 364
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
365 366 367
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

368
	spin_lock(&adapter->mbox_lock);
369 370 371

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
372 373 374 375 376 377 378 379 380

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
381
			be_pci_func(adapter));
S
Sathya Perla 已提交
382 383 384 385 386 387 388 389 390 391 392
	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

393
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
394
	if (!status) {
395
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
396 397 398
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
399

400
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
401 402 403
	return status;
}

404
/* Uses mbox */
405
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
406 407
			u8 type, bool permanent, u32 if_handle)
{
408 409
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
410 411
	int status;

412
	spin_lock(&adapter->mbox_lock);
413 414 415

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
416 417 418 419 420 421 422 423 424 425

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
426
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
427 428 429
		req->permanent = 0;
	}

430 431 432
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
433
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
434
	}
S
Sathya Perla 已提交
435

436
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
437 438 439
	return status;
}

440
/* Uses synchronous MCCQ */
441
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
442 443
		u32 if_id, u32 *pmac_id)
{
444 445
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
446 447
	int status;

448 449 450 451
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
452 453 454 455 456 457 458 459 460

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

461
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
462 463 464 465 466
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

467
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
468 469 470
	return status;
}

471
/* Uses synchronous MCCQ */
472
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
S
Sathya Perla 已提交
473
{
474 475
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
476 477
	int status;

478 479 480 481
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
482 483 484 485 486 487 488 489 490

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

491 492 493
	status = be_mcc_notify_wait(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
494 495 496 497

	return status;
}

498
/* Uses Mbox */
499
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
500 501 502
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
503 504
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
505
	struct be_dma_mem *q_mem = &cq->dma_mem;
506
	void *ctxt;
S
Sathya Perla 已提交
507 508
	int status;

509
	spin_lock(&adapter->mbox_lock);
510 511 512 513

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
			__ilog2_u32(cq->len/256));
	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
530
	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
531
	AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
S
Sathya Perla 已提交
532 533 534 535
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

536
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
537
	if (!status) {
538
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
539 540 541
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
542

543
	spin_unlock(&adapter->mbox_lock);
544 545 546 547 548 549 550 551 552 553 554 555

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

556
int be_cmd_mccq_create(struct be_adapter *adapter,
557 558 559
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
560 561
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
562
	struct be_dma_mem *q_mem = &mccq->dma_mem;
563
	void *ctxt;
564 565
	int status;

566
	spin_lock(&adapter->mbox_lock);
567 568 569 570

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
571 572 573 574 575 576 577 578

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);

579
	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
580 581 582 583 584 585 586 587 588
	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
		be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

589
	status = be_mbox_notify_wait(adapter);
590 591 592 593 594
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
595
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
596 597 598 599

	return status;
}

600
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
601 602 603
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
604 605
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
606
	struct be_dma_mem *q_mem = &txq->dma_mem;
607
	void *ctxt;
S
Sathya Perla 已提交
608 609
	int status;

610
	spin_lock(&adapter->mbox_lock);
611 612 613 614

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
615 616 617 618 619 620 621 622 623 624

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

625 626
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
627
	AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
628
			be_pci_func(adapter));
S
Sathya Perla 已提交
629 630 631 632 633 634 635
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

636
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
637 638 639 640 641
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
642

643
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
644 645 646 647

	return status;
}

648
/* Uses mbox */
649
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
650 651 652
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		u16 max_frame_size, u32 if_id, u32 rss)
{
653 654
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
655 656 657
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

658
	spin_lock(&adapter->mbox_lock);
659 660 661

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

676
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
677 678 679 680 681
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
	}
682

683
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
684 685 686 687

	return status;
}

688 689 690
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
691
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
692 693
		int queue_type)
{
694 695
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
696 697 698
	u8 subsys = 0, opcode = 0;
	int status;

699
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
700

701 702 703
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
723 724 725 726
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
727
	default:
728
		BUG();
S
Sathya Perla 已提交
729 730 731 732
	}
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

733
	status = be_mbox_notify_wait(adapter);
734

735
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
736 737 738 739

	return status;
}

740 741 742
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
743 744
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
S
Sathya Perla 已提交
745
{
746 747
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
748 749
	int status;

750
	spin_lock(&adapter->mbox_lock);
751 752 753

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
754 755 756 757 758 759

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

760 761
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
762
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
763 764 765
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

766
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
767 768 769 770 771 772 773
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

774
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
775 776 777
	return status;
}

778
/* Uses mbox */
779
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
S
Sathya Perla 已提交
780
{
781 782
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
783 784
	int status;

785
	spin_lock(&adapter->mbox_lock);
786 787 788

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
789 790 791 792 793 794 795

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

	req->interface_id = cpu_to_le32(interface_id);
796 797

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
798

799
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
800 801 802 803 804 805

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
806
 * Uses asynchronous MCC
S
Sathya Perla 已提交
807
 */
808
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
809
{
810 811 812
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
S
Sathya Perla 已提交
813

814
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
815

816 817 818
	wrb = wrb_from_mccq(adapter);
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
819 820

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
821
	wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
S
Sathya Perla 已提交
822 823 824 825 826 827 828

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

829
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
830

831 832
	spin_unlock_bh(&adapter->mcc_lock);
	return 0;
S
Sathya Perla 已提交
833 834
}

835
/* Uses synchronous mcc */
836
int be_cmd_link_status_query(struct be_adapter *adapter,
837
			bool *link_up)
S
Sathya Perla 已提交
838
{
839 840
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
841 842
	int status;

843 844 845 846
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
847 848

	*link_up = false;
S
Sathya Perla 已提交
849 850 851 852 853 854

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

855
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
856 857
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
858 859
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
			*link_up = true;
S
Sathya Perla 已提交
860 861
	}

862
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
863 864 865
	return status;
}

866
/* Uses Mbox */
867
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
868
{
869 870
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
871 872
	int status;

873
	spin_lock(&adapter->mbox_lock);
874 875 876

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
877 878 879 880 881 882

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

883
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
884 885 886 887 888
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

889
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
890 891 892
	return status;
}

893 894 895
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
896
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
897
{
898 899
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
S
Sathya Perla 已提交
900

901 902 903 904
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
905 906 907 908 909 910 911 912 913 914 915

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

916
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
917

918 919
	spin_unlock_bh(&adapter->mcc_lock);
	return 0;
S
Sathya Perla 已提交
920 921
}

922
/* Uses sycnhronous mcc */
923
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
924 925
			u32 num, bool untagged, bool promiscuous)
{
926 927
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
928 929
	int status;

930 931 932 933
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

949
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
950

951
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
952 953 954
	return status;
}

955 956 957
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
958
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
959
{
960 961
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
962
	int status;
S
Sathya Perla 已提交
963

964
	spin_lock_bh(&adapter->mcc_lock);
965

966
	wrb = wrb_from_mccq(adapter);
967
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
968 969 970 971 972 973 974 975 976 977 978

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

979
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
980

981
	spin_unlock_bh(&adapter->mcc_lock);
982
	return status;
S
Sathya Perla 已提交
983 984
}

985
/*
986
 * Uses MCC for this command as it may be called in BH context
987 988
 * (mc == NULL) => multicast promiscous
 */
989
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
990
		struct dev_mc_list *mc_list, u32 mc_count)
S
Sathya Perla 已提交
991
{
992 993 994
#define BE_MAX_MC		32 /* set mcast promisc if > 32 */
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcast_mac_config *req;
S
Sathya Perla 已提交
995

996
	spin_lock_bh(&adapter->mcc_lock);
997

998
	wrb = wrb_from_mccq(adapter);
999
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1000 1001 1002 1003 1004 1005 1006

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	if (mc_list && mc_count <= BE_MAX_MC) {
		int i;
		struct dev_mc_list *mc;

		req->num_mac = cpu_to_le16(mc_count);

		for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
			memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1017 1018
	}

1019
	be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1020

1021
	spin_unlock_bh(&adapter->mcc_lock);
1022 1023

	return 0;
S
Sathya Perla 已提交
1024 1025
}

1026
/* Uses synchrounous mcc */
1027
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1028
{
1029 1030
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1031 1032
	int status;

1033
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1034

1035 1036
	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1046
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1047

1048
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1049 1050 1051
	return status;
}

1052
/* Uses sycn mcc */
1053
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1054
{
1055 1056
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1057 1058
	int status;

1059
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1060

1061 1062
	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1063 1064 1065 1066 1067 1068

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1069
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1070 1071 1072 1073 1074 1075 1076
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1077
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1078 1079 1080
	return status;
}

1081
/* Uses mbox */
1082
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
S
Sathya Perla 已提交
1083
{
1084 1085
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1086 1087
	int status;

1088
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1089

1090 1091
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1092 1093 1094 1095 1096 1097

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1098
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1099 1100 1101
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
1102
		*cap = le32_to_cpu(resp->function_cap);
S
Sathya Perla 已提交
1103 1104
	}

1105
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1106 1107
	return status;
}
1108

1109
/* Uses mbox */
1110 1111
int be_cmd_reset_function(struct be_adapter *adapter)
{
1112 1113
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1114 1115 1116 1117
	int status;

	spin_lock(&adapter->mbox_lock);

1118 1119
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1120 1121 1122 1123 1124 1125

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1126
	status = be_mbox_notify_wait(adapter);
1127 1128 1129 1130

	spin_unlock(&adapter->mbox_lock);
	return status;
}
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
			u8 bcn, u8 sts, u8 state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
		*state = resp->beacon_state;
	}

	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1191 1192 1193
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1194
	struct be_mcc_wrb *wrb;
1195
	struct be_cmd_write_flashrom *req = cmd->va;
1196
	struct be_sge *sge;
1197 1198
	int status;

1199 1200 1201 1202 1203
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	sge = nonembedded_sgl(wrb);

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1216
	status = be_mcc_notify_wait(adapter);
1217

1218
	spin_unlock_bh(&adapter->mcc_lock);
1219 1220
	return status;
}