be_cmds.c 39.7 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25 26 27
	u32 val = 0;

	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 30 31 32 33
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
34
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 36 37 38 39 40 41 42 43 44 45
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
46
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 48 49 50
{
	compl->flags = 0;
}

51
static int be_mcc_compl_process(struct be_adapter *adapter,
52
	struct be_mcc_compl *compl)
53 54 55 56 57 58 59 60 61
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
62 63 64 65 66 67 68 69 70
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
						adapter->stats.cmd.va;
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
		}
	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71 72
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
73
		dev_warn(&adapter->pdev->dev,
74 75
		"Error in cmd completion - opcode %d, compl %d, extd %d\n",
			compl->tag0, compl_status, extd_status);
76
	}
77
	return compl_status;
78 79
}

80
/* Link state evt is a string of bytes; no need for endian swapping */
81
static void be_async_link_state_process(struct be_adapter *adapter,
82 83
		struct be_async_event_link_state *evt)
{
84 85
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 87 88 89 90 91 92 93
}

static inline bool is_link_state_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_LINK_STATE);
}
94

95
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96
{
97
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99 100 101 102 103 104 105 106

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
	adapter->mcc_obj.rearm_cq = false;
}

122
int be_process_mcc(struct be_adapter *adapter)
123
{
124
	struct be_mcc_compl *compl;
125
	int num = 0, status = 0;
126
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
127

128 129
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
130 131 132 133 134
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
			BUG_ON(!is_link_state_evt(compl->flags));

			/* Interpret compl as a async link evt */
135
			be_async_link_state_process(adapter,
136
				(struct be_async_event_link_state *) compl);
137 138
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
				status = be_mcc_compl_process(adapter, compl);
139
				atomic_dec(&mcc_obj->q.used);
140 141 142 143
		}
		be_mcc_compl_use(compl);
		num++;
	}
144

145
	if (num)
146
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
147

148
	spin_unlock_bh(&adapter->mcc_cq_lock);
149
	return status;
150 151
}

152
/* Wait till no more pending mcc requests are present */
153
static int be_mcc_wait_compl(struct be_adapter *adapter)
154
{
155 156
#define mcc_timeout		120000 /* 12s timeout */
	int i, status;
157
	for (i = 0; i < mcc_timeout; i++) {
158 159 160 161
		status = be_process_mcc(adapter);
		if (status)
			return status;

162
		if (atomic_read(&adapter->mcc_obj.q.used) == 0)
163 164 165
			break;
		udelay(100);
	}
166
	if (i == mcc_timeout) {
167
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
168 169 170
		return -1;
	}
	return 0;
171 172 173
}

/* Notify MCC requests and wait for completion */
174
static int be_mcc_notify_wait(struct be_adapter *adapter)
175
{
176
	be_mcc_notify(adapter);
177
	return be_mcc_wait_compl(adapter);
178 179
}

180
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
181 182 183 184 185
{
	int cnt = 0, wait = 5;
	u32 ready;

	do {
186 187 188 189 190 191 192 193
		ready = ioread32(db);
		if (ready == 0xffffffff) {
			dev_err(&adapter->pdev->dev,
				"pci slot disconnected\n");
			return -1;
		}

		ready &= MPU_MAILBOX_DB_RDY_MASK;
S
Sathya Perla 已提交
194 195 196
		if (ready)
			break;

197
		if (cnt > 4000000) {
198
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
S
Sathya Perla 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212
			return -1;
		}

		if (cnt > 50)
			wait = 200;
		cnt += wait;
		udelay(wait);
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
213
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
214
 */
215
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
216 217 218
{
	int status;
	u32 val = 0;
219 220
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
221
	struct be_mcc_mailbox *mbox = mbox_mem->va;
222
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
223

224 225 226 227 228
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

S
Sathya Perla 已提交
229 230 231 232 233 234
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
235
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

244
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
245 246 247
	if (status != 0)
		return status;

248
	/* A cq entry has been made now */
249 250 251
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
252 253 254
		if (status)
			return status;
	} else {
255
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
256 257
		return -1;
	}
258
	return 0;
S
Sathya Perla 已提交
259 260
}

261
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
262
{
263
	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
264 265 266 267 268 269 270 271

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

272
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
273
{
274 275
	u16 stage;
	int status, timeout = 0;
S
Sathya Perla 已提交
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
				stage);
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(2 * HZ);
			timeout += 2;
		} else {
			return 0;
		}
	} while (timeout < 20);
S
Sathya Perla 已提交
291

292 293
	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
	return -1;
S
Sathya Perla 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
308
				bool embedded, u8 sge_cnt, u32 opcode)
S
Sathya Perla 已提交
309 310 311 312 313 314 315
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
316
	wrb->tag0 = opcode;
317
	be_dws_cpu_to_le(wrb, 8);
S
Sathya Perla 已提交
318 319 320 321 322 323 324 325 326
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
327
	req_hdr->version = 0;
S
Sathya Perla 已提交
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

368
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
369
{
370 371 372 373 374
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
375 376
}

377
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
378
{
379 380 381
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

382 383 384 385 386
	if (atomic_read(&mccq->used) >= mccq->len) {
		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
		return NULL;
	}

387 388 389 390
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
391 392 393
	return wrb;
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

428 429 430
	if (adapter->eeh_err)
		return -EIO;

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}
448
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
449 450
		struct be_queue_info *eq, int eq_delay)
{
451 452
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
453 454 455
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

456
	spin_lock(&adapter->mbox_lock);
457 458 459

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
460

461
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
S
Sathya Perla 已提交
462 463 464 465 466 467 468

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
469
			be_pci_func(adapter));
S
Sathya Perla 已提交
470 471 472 473 474 475 476 477 478 479 480
	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

481
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
482
	if (!status) {
483
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
484 485 486
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
487

488
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
489 490 491
	return status;
}

492
/* Uses mbox */
493
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
494 495
			u8 type, bool permanent, u32 if_handle)
{
496 497
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
498 499
	int status;

500
	spin_lock(&adapter->mbox_lock);
501 502 503

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
504

505 506
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_MAC_QUERY);
S
Sathya Perla 已提交
507 508 509 510 511 512 513 514

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
515
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
516 517 518
		req->permanent = 0;
	}

519 520 521
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
522
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
523
	}
S
Sathya Perla 已提交
524

525
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
526 527 528
	return status;
}

529
/* Uses synchronous MCCQ */
530
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
531 532
		u32 if_id, u32 *pmac_id)
{
533 534
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
535 536
	int status;

537 538 539
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
540 541 542 543
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
544
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
545

546 547
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_ADD);
S
Sathya Perla 已提交
548 549 550 551 552 553 554

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

555
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
556 557 558 559 560
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

561
err:
562
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
563 564 565
	return status;
}

566
/* Uses synchronous MCCQ */
567
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
S
Sathya Perla 已提交
568
{
569 570
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
571 572
	int status;

573 574 575
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
576 577 578 579
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
580
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
581

582 583
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_DEL);
S
Sathya Perla 已提交
584 585 586 587 588 589 590

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

591 592
	status = be_mcc_notify_wait(adapter);

593
err:
594
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
595 596 597
	return status;
}

598
/* Uses Mbox */
599
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
600 601 602
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
603 604
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
605
	struct be_dma_mem *q_mem = &cq->dma_mem;
606
	void *ctxt;
S
Sathya Perla 已提交
607 608
	int status;

609
	spin_lock(&adapter->mbox_lock);
610 611 612 613

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
614

615 616
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_CQ_CREATE);
S
Sathya Perla 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
			__ilog2_u32(cq->len/256));
	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
631
	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
632
	AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
S
Sathya Perla 已提交
633 634 635 636
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

637
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
638
	if (!status) {
639
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
640 641 642
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
643

644
	spin_unlock(&adapter->mbox_lock);
645 646 647 648 649 650 651 652 653 654 655 656

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

657
int be_cmd_mccq_create(struct be_adapter *adapter,
658 659 660
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
661 662
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
663
	struct be_dma_mem *q_mem = &mccq->dma_mem;
664
	void *ctxt;
665 666
	int status;

667
	spin_lock(&adapter->mbox_lock);
668 669 670 671

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
672

673 674
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_MCC_CREATE);
675 676 677 678 679 680

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);

681
	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
682 683 684 685 686 687 688 689 690
	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
		be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

691
	status = be_mbox_notify_wait(adapter);
692 693 694 695 696
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
697
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
698 699 700 701

	return status;
}

702
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
703 704 705
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
706 707
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
708
	struct be_dma_mem *q_mem = &txq->dma_mem;
709
	void *ctxt;
S
Sathya Perla 已提交
710 711
	int status;

712
	spin_lock(&adapter->mbox_lock);
713 714 715 716

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
717

718 719
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_TX_CREATE);
S
Sathya Perla 已提交
720 721 722 723 724 725 726 727

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

728 729
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
730
	AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
731
			be_pci_func(adapter));
S
Sathya Perla 已提交
732 733 734 735 736 737 738
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

739
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
740 741 742 743 744
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
745

746
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
747 748 749 750

	return status;
}

751
/* Uses mbox */
752
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
753 754 755
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		u16 max_frame_size, u32 if_id, u32 rss)
{
756 757
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
758 759 760
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

761
	spin_lock(&adapter->mbox_lock);
762 763 764

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
765

766 767
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_RX_CREATE);
S
Sathya Perla 已提交
768 769 770 771 772 773 774 775 776 777 778 779

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

780
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
781 782 783 784 785
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
	}
786

787
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
788 789 790 791

	return status;
}

792 793 794
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
795
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
796 797
		int queue_type)
{
798 799
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
800 801 802
	u8 subsys = 0, opcode = 0;
	int status;

803 804 805
	if (adapter->eeh_err)
		return -EIO;

806
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
807

808 809 810
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
828 829 830 831
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
832
	default:
833
		BUG();
S
Sathya Perla 已提交
834
	}
835 836 837

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);

S
Sathya Perla 已提交
838 839 840
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

841
	status = be_mbox_notify_wait(adapter);
842

843
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
844 845 846 847

	return status;
}

848 849 850
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
851 852
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
S
Sathya Perla 已提交
853
{
854 855
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
856 857
	int status;

858
	spin_lock(&adapter->mbox_lock);
859 860 861

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
862

863 864
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_CREATE);
S
Sathya Perla 已提交
865 866 867 868

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

869 870
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
871
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
872 873 874
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

875
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
876 877 878 879 880 881 882
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

883
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
884 885 886
	return status;
}

887
/* Uses mbox */
888
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
S
Sathya Perla 已提交
889
{
890 891
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
892 893
	int status;

894 895 896
	if (adapter->eeh_err)
		return -EIO;

897
	spin_lock(&adapter->mbox_lock);
898 899 900

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
901

902 903
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
S
Sathya Perla 已提交
904 905 906 907 908

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

	req->interface_id = cpu_to_le32(interface_id);
909 910

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
911

912
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
913 914 915 916 917 918

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
919
 * Uses asynchronous MCC
S
Sathya Perla 已提交
920
 */
921
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
922
{
923 924 925
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
926
	int status = 0;
S
Sathya Perla 已提交
927

928
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
929

930
	wrb = wrb_from_mccq(adapter);
931 932 933 934
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
935 936
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
937

938 939
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_GET_STATISTICS);
S
Sathya Perla 已提交
940 941 942 943 944 945 946

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

947
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
948

949
err:
950
	spin_unlock_bh(&adapter->mcc_lock);
951
	return status;
S
Sathya Perla 已提交
952 953
}

954
/* Uses synchronous mcc */
955
int be_cmd_link_status_query(struct be_adapter *adapter,
956
			bool *link_up, u8 *mac_speed, u16 *link_speed)
S
Sathya Perla 已提交
957
{
958 959
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
960 961
	int status;

962 963 964
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
965 966 967 968
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
969
	req = embedded_payload(wrb);
970 971

	*link_up = false;
S
Sathya Perla 已提交
972

973 974
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
S
Sathya Perla 已提交
975 976 977 978

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

979
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
980 981
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
982
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
983
			*link_up = true;
984 985 986
			*link_speed = le16_to_cpu(resp->link_speed);
			*mac_speed = resp->mac_speed;
		}
S
Sathya Perla 已提交
987 988
	}

989
err:
990
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
991 992 993
	return status;
}

994
/* Uses Mbox */
995
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
996
{
997 998
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
999 1000
	int status;

1001
	spin_lock(&adapter->mbox_lock);
1002 1003 1004

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1005

1006 1007
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FW_VERSION);
S
Sathya Perla 已提交
1008 1009 1010 1011

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

1012
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1013 1014 1015 1016 1017
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

1018
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1019 1020 1021
	return status;
}

1022 1023 1024
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
1025
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
1026
{
1027 1028
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
1029
	int status = 0;
S
Sathya Perla 已提交
1030

1031 1032 1033
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1034 1035 1036 1037
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1038
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1039

1040 1041
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_MODIFY_EQ_DELAY);
S
Sathya Perla 已提交
1042 1043 1044 1045 1046 1047 1048 1049 1050

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

1051
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
1052

1053
err:
1054
	spin_unlock_bh(&adapter->mcc_lock);
1055
	return status;
S
Sathya Perla 已提交
1056 1057
}

1058
/* Uses sycnhronous mcc */
1059
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
1060 1061
			u32 num, bool untagged, bool promiscuous)
{
1062 1063
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
1064 1065
	int status;

1066 1067 1068
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1069 1070 1071 1072
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1073
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1074

1075 1076
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_VLAN_CONFIG);
S
Sathya Perla 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

1090
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1091

1092
err:
1093
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1094 1095 1096
	return status;
}

1097 1098 1099
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
1100
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
1101
{
1102 1103
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
1104
	int status;
S
Sathya Perla 已提交
1105

1106
	spin_lock_bh(&adapter->mcc_lock);
1107

1108
	wrb = wrb_from_mccq(adapter);
1109 1110 1111 1112
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1113
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1114

1115
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
S
Sathya Perla 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

1125
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1126

1127
err:
1128
	spin_unlock_bh(&adapter->mcc_lock);
1129
	return status;
S
Sathya Perla 已提交
1130 1131
}

1132
/*
1133
 * Uses MCC for this command as it may be called in BH context
1134 1135
 * (mc == NULL) => multicast promiscous
 */
1136
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1137 1138
		struct dev_mc_list *mc_list, u32 mc_count,
		struct be_dma_mem *mem)
S
Sathya Perla 已提交
1139
{
1140
	struct be_mcc_wrb *wrb;
1141 1142 1143
	struct be_cmd_req_mcast_mac_config *req = mem->va;
	struct be_sge *sge;
	int status;
S
Sathya Perla 已提交
1144

1145
	spin_lock_bh(&adapter->mcc_lock);
1146

1147
	wrb = wrb_from_mccq(adapter);
1148 1149 1150 1151
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1152 1153
	sge = nonembedded_sgl(wrb);
	memset(req, 0, sizeof(*req));
S
Sathya Perla 已提交
1154

1155 1156
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_NTWK_MULTICAST_SET);
1157 1158 1159
	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(mem->size);
S
Sathya Perla 已提交
1160 1161 1162 1163 1164

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
1165
	if (mc_list) {
1166 1167 1168 1169 1170 1171 1172 1173 1174
		int i;
		struct dev_mc_list *mc;

		req->num_mac = cpu_to_le16(mc_count);

		for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
			memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1175 1176
	}

1177
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1178

1179
err:
1180
	spin_unlock_bh(&adapter->mcc_lock);
1181
	return status;
S
Sathya Perla 已提交
1182 1183
}

1184
/* Uses synchrounous mcc */
1185
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1186
{
1187 1188
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1189 1190
	int status;

1191
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1192

1193
	wrb = wrb_from_mccq(adapter);
1194 1195 1196 1197
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1198
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1199

1200 1201
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_SET_FLOW_CONTROL);
S
Sathya Perla 已提交
1202 1203 1204 1205 1206 1207 1208

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1209
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1210

1211
err:
1212
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1213 1214 1215
	return status;
}

1216
/* Uses sycn mcc */
1217
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1218
{
1219 1220
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1221 1222
	int status;

1223
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1224

1225
	wrb = wrb_from_mccq(adapter);
1226 1227 1228 1229
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1230
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1231

1232 1233
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FLOW_CONTROL);
S
Sathya Perla 已提交
1234 1235 1236 1237

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1238
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1239 1240 1241 1242 1243 1244 1245
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1246
err:
1247
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1248 1249 1250
	return status;
}

1251
/* Uses mbox */
1252
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
S
Sathya Perla 已提交
1253
{
1254 1255
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1256 1257
	int status;

1258
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1259

1260 1261
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1262

1263 1264
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
S
Sathya Perla 已提交
1265 1266 1267 1268

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1269
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1270 1271 1272
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
1273
		*cap = le32_to_cpu(resp->function_cap);
S
Sathya Perla 已提交
1274 1275
	}

1276
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1277 1278
	return status;
}
1279

1280
/* Uses mbox */
1281 1282
int be_cmd_reset_function(struct be_adapter *adapter)
{
1283 1284
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1285 1286 1287 1288
	int status;

	spin_lock(&adapter->mbox_lock);

1289 1290
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1291

1292 1293
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_FUNCTION_RESET);
1294 1295 1296 1297

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1298
	status = be_mbox_notify_wait(adapter);
1299 1300 1301 1302

	spin_unlock(&adapter->mbox_lock);
	return status;
}
1303

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
			u8 bcn, u8 sts, u8 state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1315 1316 1317 1318
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1319 1320
	req = embedded_payload(wrb);

1321 1322
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

1334
err:
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1349 1350 1351 1352
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1353 1354
	req = embedded_payload(wrb);

1355 1356
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_BEACON_STATE);
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
		*state = resp->beacon_state;
	}

1370
err:
1371 1372 1373 1374
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
/* Uses sync mcc */
int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
				u8 *connector)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_port_type *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1386 1387 1388 1389
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1390 1391
	req = embedded_payload(wrb);

1392 1393
	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
			OPCODE_COMMON_READ_TRANSRECV_DATA);
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));

	req->port = cpu_to_le32(port);
	req->page_num = cpu_to_le32(TR_PAGE_A0);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
			*connector = resp->data.connector;
	}

1406
err:
1407 1408 1409 1410
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1411 1412 1413
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1414
	struct be_mcc_wrb *wrb;
1415
	struct be_cmd_write_flashrom *req;
1416
	struct be_sge *sge;
1417 1418
	int status;

1419 1420 1421
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1422 1423 1424 1425 1426
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
1427 1428
	sge = nonembedded_sgl(wrb);

1429 1430
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
			OPCODE_COMMON_WRITE_FLASHROM);
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1442
	status = be_mcc_notify_wait(adapter);
1443

1444
err:
1445
	spin_unlock_bh(&adapter->mcc_lock);
1446 1447
	return status;
}
1448

1449 1450
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
			 int offset)
1451 1452 1453 1454 1455 1456 1457 1458
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_write_flashrom *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1459 1460 1461 1462
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1463 1464
	req = embedded_payload(wrb);

1465 1466
	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
			OPCODE_COMMON_READ_FLASHROM);
1467 1468 1469 1470

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);

1471
	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1472
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1473
	req->params.offset = offset;
1474 1475 1476 1477 1478 1479
	req->params.data_buf_size = 0x4;

	status = be_mcc_notify_wait(adapter);
	if (!status)
		memcpy(flashed_crc, req->params.data_buf, 4);

1480
err:
1481 1482 1483
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519

extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
	memcpy(req->magic_mac, mac, ETH_ALEN);

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1520

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
			u8 loopback_type, u8 enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_lmode *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
			sizeof(*req));

	req->src_port = port_num;
	req->dest_port = port_num;
	req->loopback_type = loopback_type;
	req->loopback_state = enable;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_loopback_test *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_LOOPBACK_TEST);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1578
	req->hdr.timeout = 4;
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648

	req->pattern = cpu_to_le64(pattern);
	req->src_port = cpu_to_le32(port_num);
	req->dest_port = cpu_to_le32(port_num);
	req->pkt_size = cpu_to_le32(pkt_size);
	req->num_pkts = cpu_to_le32(num_pkts);
	req->loopback_type = cpu_to_le32(loopback_type);

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
		status = le32_to_cpu(resp->status);
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
				u32 byte_cnt, struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_ddrdma_test *req;
	struct be_sge *sge;
	int status;
	int i, j = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
	sge = nonembedded_sgl(wrb);
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
				OPCODE_LOWLEVEL_HOST_DDR_DMA);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);

	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->pattern = cpu_to_le64(pattern);
	req->byte_count = cpu_to_le32(byte_cnt);
	for (i = 0; i < byte_cnt; i++) {
		req->snd_buff[i] = (u8)(pattern >> (j*8));
		j++;
		if (j > 7)
			j = 0;
	}

	status = be_mcc_notify_wait(adapter);

	if (!status) {
		struct be_cmd_resp_ddrdma_test *resp;
		resp = cmd->va;
		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
				resp->snd_err) {
			status = -1;
		}
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678

extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_seeprom_read *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_SEEPROM_READ);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_SEEPROM_READ, sizeof(*req));

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}