be_cmds.c 39.4 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25 26 27
	u32 val = 0;

	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 30 31 32 33
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
34
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 36 37 38 39 40 41 42 43 44 45
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
46
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 48 49 50
{
	compl->flags = 0;
}

51
static int be_mcc_compl_process(struct be_adapter *adapter,
52
	struct be_mcc_compl *compl)
53 54 55 56 57 58 59 60 61
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
62 63 64 65 66 67 68 69 70
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
						adapter->stats.cmd.va;
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
		}
	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71 72
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
73
		dev_warn(&adapter->pdev->dev,
74 75
		"Error in cmd completion - opcode %d, compl %d, extd %d\n",
			compl->tag0, compl_status, extd_status);
76
	}
77
	return compl_status;
78 79
}

80
/* Link state evt is a string of bytes; no need for endian swapping */
81
static void be_async_link_state_process(struct be_adapter *adapter,
82 83
		struct be_async_event_link_state *evt)
{
84 85
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 87 88 89 90 91 92 93
}

static inline bool is_link_state_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_LINK_STATE);
}
94

95
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96
{
97
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99 100 101 102 103 104 105 106

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
	adapter->mcc_obj.rearm_cq = false;
}

S
Sathya Perla 已提交
122
int be_process_mcc(struct be_adapter *adapter, int *status)
123
{
124
	struct be_mcc_compl *compl;
S
Sathya Perla 已提交
125
	int num = 0;
126
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
127

128 129
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
130 131 132 133 134
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
			BUG_ON(!is_link_state_evt(compl->flags));

			/* Interpret compl as a async link evt */
135
			be_async_link_state_process(adapter,
136
				(struct be_async_event_link_state *) compl);
137
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
S
Sathya Perla 已提交
138
				*status = be_mcc_compl_process(adapter, compl);
139
				atomic_dec(&mcc_obj->q.used);
140 141 142 143
		}
		be_mcc_compl_use(compl);
		num++;
	}
144

145
	spin_unlock_bh(&adapter->mcc_cq_lock);
S
Sathya Perla 已提交
146
	return num;
147 148
}

149
/* Wait till no more pending mcc requests are present */
150
static int be_mcc_wait_compl(struct be_adapter *adapter)
151
{
152
#define mcc_timeout		120000 /* 12s timeout */
S
Sathya Perla 已提交
153 154 155
	int i, num, status = 0;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

156
	for (i = 0; i < mcc_timeout; i++) {
S
Sathya Perla 已提交
157 158 159 160
		num = be_process_mcc(adapter, &status);
		if (num)
			be_cq_notify(adapter, mcc_obj->cq.id,
				mcc_obj->rearm_cq, num);
161

S
Sathya Perla 已提交
162
		if (atomic_read(&mcc_obj->q.used) == 0)
163 164 165
			break;
		udelay(100);
	}
166
	if (i == mcc_timeout) {
167
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
168 169
		return -1;
	}
S
Sathya Perla 已提交
170
	return status;
171 172 173
}

/* Notify MCC requests and wait for completion */
174
static int be_mcc_notify_wait(struct be_adapter *adapter)
175
{
176
	be_mcc_notify(adapter);
177
	return be_mcc_wait_compl(adapter);
178 179
}

180
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
181 182 183 184 185
{
	int cnt = 0, wait = 5;
	u32 ready;

	do {
186 187 188 189 190 191 192 193
		ready = ioread32(db);
		if (ready == 0xffffffff) {
			dev_err(&adapter->pdev->dev,
				"pci slot disconnected\n");
			return -1;
		}

		ready &= MPU_MAILBOX_DB_RDY_MASK;
S
Sathya Perla 已提交
194 195 196
		if (ready)
			break;

197
		if (cnt > 4000000) {
198
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
S
Sathya Perla 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212
			return -1;
		}

		if (cnt > 50)
			wait = 200;
		cnt += wait;
		udelay(wait);
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
213
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
214
 */
215
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
216 217 218
{
	int status;
	u32 val = 0;
219 220
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
221
	struct be_mcc_mailbox *mbox = mbox_mem->va;
222
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
223

224 225 226 227 228
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

S
Sathya Perla 已提交
229 230 231 232 233 234
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
235
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

244
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
245 246 247
	if (status != 0)
		return status;

248
	/* A cq entry has been made now */
249 250 251
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
252 253 254
		if (status)
			return status;
	} else {
255
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
256 257
		return -1;
	}
258
	return 0;
S
Sathya Perla 已提交
259 260
}

261
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
262
{
263
	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
264 265 266 267 268 269 270 271

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

272
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
273
{
274 275
	u16 stage;
	int status, timeout = 0;
S
Sathya Perla 已提交
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
				stage);
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(2 * HZ);
			timeout += 2;
		} else {
			return 0;
		}
	} while (timeout < 20);
S
Sathya Perla 已提交
291

292 293
	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
	return -1;
S
Sathya Perla 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
308
				bool embedded, u8 sge_cnt, u32 opcode)
S
Sathya Perla 已提交
309 310 311 312 313 314 315
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
316
	wrb->tag0 = opcode;
317
	be_dws_cpu_to_le(wrb, 8);
S
Sathya Perla 已提交
318 319 320 321 322 323 324 325 326
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
327
	req_hdr->version = 0;
S
Sathya Perla 已提交
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

368
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
369
{
370 371 372 373 374
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
375 376
}

377
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
378
{
379 380 381
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

382 383 384 385 386
	if (atomic_read(&mccq->used) >= mccq->len) {
		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
		return NULL;
	}

387 388 389 390
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
391 392 393
	return wrb;
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

428 429 430
	if (adapter->eeh_err)
		return -EIO;

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	spin_lock(&adapter->mbox_lock);

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

	spin_unlock(&adapter->mbox_lock);
	return status;
}
448
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
449 450
		struct be_queue_info *eq, int eq_delay)
{
451 452
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
453 454 455
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

456
	spin_lock(&adapter->mbox_lock);
457 458 459

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
460

461
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
S
Sathya Perla 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

479
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
480
	if (!status) {
481
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
482 483 484
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
485

486
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
487 488 489
	return status;
}

490
/* Uses mbox */
491
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
492 493
			u8 type, bool permanent, u32 if_handle)
{
494 495
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
496 497
	int status;

498
	spin_lock(&adapter->mbox_lock);
499 500 501

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
502

503 504
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_MAC_QUERY);
S
Sathya Perla 已提交
505 506 507 508 509 510 511 512

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
513
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
514 515 516
		req->permanent = 0;
	}

517 518 519
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
520
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
521
	}
S
Sathya Perla 已提交
522

523
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
524 525 526
	return status;
}

527
/* Uses synchronous MCCQ */
528
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
529 530
		u32 if_id, u32 *pmac_id)
{
531 532
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
533 534
	int status;

535 536 537
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
538 539 540 541
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
542
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
543

544 545
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_ADD);
S
Sathya Perla 已提交
546 547 548 549 550 551 552

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

553
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
554 555 556 557 558
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

559
err:
560
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
561 562 563
	return status;
}

564
/* Uses synchronous MCCQ */
565
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
S
Sathya Perla 已提交
566
{
567 568
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
569 570
	int status;

571 572 573
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
574 575 576 577
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
578
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
579

580 581
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_DEL);
S
Sathya Perla 已提交
582 583 584 585 586 587 588

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

589 590
	status = be_mcc_notify_wait(adapter);

591
err:
592
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
593 594 595
	return status;
}

596
/* Uses Mbox */
597
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
598 599 600
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
601 602
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
603
	struct be_dma_mem *q_mem = &cq->dma_mem;
604
	void *ctxt;
S
Sathya Perla 已提交
605 606
	int status;

607
	spin_lock(&adapter->mbox_lock);
608 609 610 611

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
612

613 614
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_CQ_CREATE);
S
Sathya Perla 已提交
615 616 617 618 619 620 621 622 623 624 625 626 627 628

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
			__ilog2_u32(cq->len/256));
	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
629
	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
S
Sathya Perla 已提交
630 631 632 633
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

634
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
635
	if (!status) {
636
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
637 638 639
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
640

641
	spin_unlock(&adapter->mbox_lock);
642 643 644 645 646 647 648 649 650 651 652 653

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

654
int be_cmd_mccq_create(struct be_adapter *adapter,
655 656 657
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
658 659
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
660
	struct be_dma_mem *q_mem = &mccq->dma_mem;
661
	void *ctxt;
662 663
	int status;

664
	spin_lock(&adapter->mbox_lock);
665 666 667 668

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
669

670 671
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_MCC_CREATE);
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);

	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
		be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

687
	status = be_mbox_notify_wait(adapter);
688 689 690 691 692
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
693
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
694 695 696 697

	return status;
}

698
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
699 700 701
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
702 703
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
704
	struct be_dma_mem *q_mem = &txq->dma_mem;
705
	void *ctxt;
S
Sathya Perla 已提交
706 707
	int status;

708
	spin_lock(&adapter->mbox_lock);
709 710 711 712

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
713

714 715
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_TX_CREATE);
S
Sathya Perla 已提交
716 717 718 719 720 721 722 723

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

724 725
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
726 727 728 729 730 731 732
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

733
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
734 735 736 737 738
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
739

740
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
741 742 743 744

	return status;
}

745
/* Uses mbox */
746
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
747 748 749
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		u16 max_frame_size, u32 if_id, u32 rss)
{
750 751
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
752 753 754
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

755
	spin_lock(&adapter->mbox_lock);
756 757 758

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
759

760 761
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_RX_CREATE);
S
Sathya Perla 已提交
762 763 764 765 766 767 768 769 770 771 772 773

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

774
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
775 776 777 778 779
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
	}
780

781
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
782 783 784 785

	return status;
}

786 787 788
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
789
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
790 791
		int queue_type)
{
792 793
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
794 795 796
	u8 subsys = 0, opcode = 0;
	int status;

797 798 799
	if (adapter->eeh_err)
		return -EIO;

800
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
801

802 803 804
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
822 823 824 825
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
826
	default:
827
		BUG();
S
Sathya Perla 已提交
828
	}
829 830 831

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);

S
Sathya Perla 已提交
832 833 834
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

835
	status = be_mbox_notify_wait(adapter);
836

837
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
838 839 840 841

	return status;
}

842 843 844
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
845 846
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
S
Sathya Perla 已提交
847
{
848 849
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
850 851
	int status;

852
	spin_lock(&adapter->mbox_lock);
853 854 855

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
856

857 858
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_CREATE);
S
Sathya Perla 已提交
859 860 861 862

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

863 864
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
865
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
866 867 868
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

869
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
870 871 872 873 874 875 876
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

877
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
878 879 880
	return status;
}

881
/* Uses mbox */
882
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
S
Sathya Perla 已提交
883
{
884 885
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
886 887
	int status;

888 889 890
	if (adapter->eeh_err)
		return -EIO;

891
	spin_lock(&adapter->mbox_lock);
892 893 894

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
895

896 897
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
S
Sathya Perla 已提交
898 899 900 901 902

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

	req->interface_id = cpu_to_le32(interface_id);
903 904

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
905

906
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
907 908 909 910 911 912

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
913
 * Uses asynchronous MCC
S
Sathya Perla 已提交
914
 */
915
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
916
{
917 918 919
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
920
	int status = 0;
S
Sathya Perla 已提交
921

922
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
923

924
	wrb = wrb_from_mccq(adapter);
925 926 927 928
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
929 930
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
931

932 933
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_GET_STATISTICS);
S
Sathya Perla 已提交
934 935 936 937 938 939 940

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

941
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
942

943
err:
944
	spin_unlock_bh(&adapter->mcc_lock);
945
	return status;
S
Sathya Perla 已提交
946 947
}

948
/* Uses synchronous mcc */
949
int be_cmd_link_status_query(struct be_adapter *adapter,
950
			bool *link_up, u8 *mac_speed, u16 *link_speed)
S
Sathya Perla 已提交
951
{
952 953
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
954 955
	int status;

956 957 958
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
959 960 961 962
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
963
	req = embedded_payload(wrb);
964 965

	*link_up = false;
S
Sathya Perla 已提交
966

967 968
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
S
Sathya Perla 已提交
969 970 971 972

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

973
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
974 975
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
976
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
977
			*link_up = true;
978 979 980
			*link_speed = le16_to_cpu(resp->link_speed);
			*mac_speed = resp->mac_speed;
		}
S
Sathya Perla 已提交
981 982
	}

983
err:
984
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
985 986 987
	return status;
}

988
/* Uses Mbox */
989
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
990
{
991 992
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
993 994
	int status;

995
	spin_lock(&adapter->mbox_lock);
996 997 998

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
999

1000 1001
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FW_VERSION);
S
Sathya Perla 已提交
1002 1003 1004 1005

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

1006
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1007 1008 1009 1010 1011
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

1012
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1013 1014 1015
	return status;
}

1016 1017 1018
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
1019
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
1020
{
1021 1022
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
1023
	int status = 0;
S
Sathya Perla 已提交
1024

1025 1026 1027
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1028 1029 1030 1031
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1032
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1033

1034 1035
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_MODIFY_EQ_DELAY);
S
Sathya Perla 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

1045
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
1046

1047
err:
1048
	spin_unlock_bh(&adapter->mcc_lock);
1049
	return status;
S
Sathya Perla 已提交
1050 1051
}

1052
/* Uses sycnhronous mcc */
1053
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
1054 1055
			u32 num, bool untagged, bool promiscuous)
{
1056 1057
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
1058 1059
	int status;

1060 1061 1062
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1063 1064 1065 1066
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1067
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1068

1069 1070
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_VLAN_CONFIG);
S
Sathya Perla 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

1084
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1085

1086
err:
1087
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1088 1089 1090
	return status;
}

1091 1092 1093
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
1094
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
1095
{
1096 1097
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
1098
	int status;
S
Sathya Perla 已提交
1099

1100
	spin_lock_bh(&adapter->mcc_lock);
1101

1102
	wrb = wrb_from_mccq(adapter);
1103 1104 1105 1106
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1107
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1108

1109
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
S
Sathya Perla 已提交
1110 1111 1112 1113 1114 1115 1116 1117 1118

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

1119
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1120

1121
err:
1122
	spin_unlock_bh(&adapter->mcc_lock);
1123
	return status;
S
Sathya Perla 已提交
1124 1125
}

1126
/*
1127
 * Uses MCC for this command as it may be called in BH context
1128 1129
 * (mc == NULL) => multicast promiscous
 */
1130
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1131
		struct net_device *netdev, struct be_dma_mem *mem)
S
Sathya Perla 已提交
1132
{
1133
	struct be_mcc_wrb *wrb;
1134 1135 1136
	struct be_cmd_req_mcast_mac_config *req = mem->va;
	struct be_sge *sge;
	int status;
S
Sathya Perla 已提交
1137

1138
	spin_lock_bh(&adapter->mcc_lock);
1139

1140
	wrb = wrb_from_mccq(adapter);
1141 1142 1143 1144
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1145 1146
	sge = nonembedded_sgl(wrb);
	memset(req, 0, sizeof(*req));
S
Sathya Perla 已提交
1147

1148 1149
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_NTWK_MULTICAST_SET);
1150 1151 1152
	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(mem->size);
S
Sathya Perla 已提交
1153 1154 1155 1156 1157

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
1158
	if (netdev) {
1159 1160 1161
		int i;
		struct dev_mc_list *mc;

1162
		req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1163

1164 1165
		i = 0;
		netdev_for_each_mc_addr(mc, netdev)
1166 1167 1168
			memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1169 1170
	}

1171
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1172

1173
err:
1174
	spin_unlock_bh(&adapter->mcc_lock);
1175
	return status;
S
Sathya Perla 已提交
1176 1177
}

1178
/* Uses synchrounous mcc */
1179
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1180
{
1181 1182
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1183 1184
	int status;

1185
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1186

1187
	wrb = wrb_from_mccq(adapter);
1188 1189 1190 1191
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1192
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1193

1194 1195
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_SET_FLOW_CONTROL);
S
Sathya Perla 已提交
1196 1197 1198 1199 1200 1201 1202

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1203
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1204

1205
err:
1206
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1207 1208 1209
	return status;
}

1210
/* Uses sycn mcc */
1211
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1212
{
1213 1214
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1215 1216
	int status;

1217
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1218

1219
	wrb = wrb_from_mccq(adapter);
1220 1221 1222 1223
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1224
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1225

1226 1227
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FLOW_CONTROL);
S
Sathya Perla 已提交
1228 1229 1230 1231

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1232
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1233 1234 1235 1236 1237 1238 1239
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1240
err:
1241
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1242 1243 1244
	return status;
}

1245
/* Uses mbox */
1246
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
S
Sathya Perla 已提交
1247
{
1248 1249
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1250 1251
	int status;

1252
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1253

1254 1255
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1256

1257 1258
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
S
Sathya Perla 已提交
1259 1260 1261 1262

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1263
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1264 1265 1266
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
1267
		*cap = le32_to_cpu(resp->function_cap);
S
Sathya Perla 已提交
1268 1269
	}

1270
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1271 1272
	return status;
}
1273

1274
/* Uses mbox */
1275 1276
int be_cmd_reset_function(struct be_adapter *adapter)
{
1277 1278
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1279 1280 1281 1282
	int status;

	spin_lock(&adapter->mbox_lock);

1283 1284
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1285

1286 1287
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_FUNCTION_RESET);
1288 1289 1290 1291

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1292
	status = be_mbox_notify_wait(adapter);
1293 1294 1295 1296

	spin_unlock(&adapter->mbox_lock);
	return status;
}
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
			u8 bcn, u8 sts, u8 state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1309 1310 1311 1312
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1313 1314
	req = embedded_payload(wrb);

1315 1316
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

1328
err:
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1343 1344 1345 1346
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1347 1348
	req = embedded_payload(wrb);

1349 1350
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_BEACON_STATE);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
		*state = resp->beacon_state;
	}

1364
err:
1365 1366 1367 1368
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
/* Uses sync mcc */
int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
				u8 *connector)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_port_type *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1380 1381 1382 1383
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1384 1385
	req = embedded_payload(wrb);

1386 1387
	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
			OPCODE_COMMON_READ_TRANSRECV_DATA);
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));

	req->port = cpu_to_le32(port);
	req->page_num = cpu_to_le32(TR_PAGE_A0);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
			*connector = resp->data.connector;
	}

1400
err:
1401 1402 1403 1404
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1405 1406 1407
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1408
	struct be_mcc_wrb *wrb;
1409
	struct be_cmd_write_flashrom *req;
1410
	struct be_sge *sge;
1411 1412
	int status;

1413 1414 1415
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1416 1417 1418 1419 1420
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
1421 1422
	sge = nonembedded_sgl(wrb);

1423 1424
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
			OPCODE_COMMON_WRITE_FLASHROM);
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1436
	status = be_mcc_notify_wait(adapter);
1437

1438
err:
1439
	spin_unlock_bh(&adapter->mcc_lock);
1440 1441
	return status;
}
1442

1443 1444
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
			 int offset)
1445 1446 1447 1448 1449 1450 1451 1452
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_write_flashrom *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1453 1454 1455 1456
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1457 1458
	req = embedded_payload(wrb);

1459 1460
	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
			OPCODE_COMMON_READ_FLASHROM);
1461 1462 1463 1464

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);

1465
	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1466
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1467
	req->params.offset = offset;
1468 1469 1470 1471 1472 1473
	req->params.data_buf_size = 0x4;

	status = be_mcc_notify_wait(adapter);
	if (!status)
		memcpy(flashed_crc, req->params.data_buf, 4);

1474
err:
1475 1476 1477
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
	memcpy(req->magic_mac, mac, ETH_ALEN);

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1514

1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
			u8 loopback_type, u8 enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_lmode *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
			sizeof(*req));

	req->src_port = port_num;
	req->dest_port = port_num;
	req->loopback_type = loopback_type;
	req->loopback_state = enable;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_loopback_test *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_LOOPBACK_TEST);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1572
	req->hdr.timeout = 4;
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642

	req->pattern = cpu_to_le64(pattern);
	req->src_port = cpu_to_le32(port_num);
	req->dest_port = cpu_to_le32(port_num);
	req->pkt_size = cpu_to_le32(pkt_size);
	req->num_pkts = cpu_to_le32(num_pkts);
	req->loopback_type = cpu_to_le32(loopback_type);

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
		status = le32_to_cpu(resp->status);
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
				u32 byte_cnt, struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_ddrdma_test *req;
	struct be_sge *sge;
	int status;
	int i, j = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
	sge = nonembedded_sgl(wrb);
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
				OPCODE_LOWLEVEL_HOST_DDR_DMA);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);

	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->pattern = cpu_to_le64(pattern);
	req->byte_count = cpu_to_le32(byte_cnt);
	for (i = 0; i < byte_cnt; i++) {
		req->snd_buff[i] = (u8)(pattern >> (j*8));
		j++;
		if (j > 7)
			j = 0;
	}

	status = be_mcc_notify_wait(adapter);

	if (!status) {
		struct be_cmd_resp_ddrdma_test *resp;
		resp = cmd->va;
		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
				resp->snd_err) {
			status = -1;
		}
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672

extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_seeprom_read *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_SEEPROM_READ);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_SEEPROM_READ, sizeof(*req));

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}