be_cmds.c 45.5 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25
	u32 val = 0;

26 27 28 29 30 31
	if (adapter->eeh_err) {
		dev_info(&adapter->pdev->dev,
			"Error in Card Detected! Cannot issue commands\n");
		return;
	}

32 33
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
34 35

	wmb();
36
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
37 38 39 40 41
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
42
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
43 44 45 46 47 48 49 50 51 52 53
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
54
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
55 56 57 58
{
	compl->flags = 0;
}

59
static int be_mcc_compl_process(struct be_adapter *adapter,
60
	struct be_mcc_compl *compl)
61 62 63 64 65 66 67 68 69
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
70 71 72 73 74 75 76

	if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
		(compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
		adapter->flash_status = compl_status;
		complete(&adapter->flash_compl);
	}

77 78 79
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
80
						adapter->stats_cmd.va;
81 82 83
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
84
			adapter->stats_ioctl_sent = false;
85
		}
86 87
	} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
		   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
88 89
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
90
		dev_warn(&adapter->pdev->dev,
91 92
		"Error in cmd completion - opcode %d, compl %d, extd %d\n",
			compl->tag0, compl_status, extd_status);
93
	}
94
	return compl_status;
95 96
}

97
/* Link state evt is a string of bytes; no need for endian swapping */
98
static void be_async_link_state_process(struct be_adapter *adapter,
99 100
		struct be_async_event_link_state *evt)
{
101 102
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
103 104
}

105 106 107 108 109 110
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
		struct be_async_event_grp5_cos_priority *evt)
{
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
111
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

/* Grp5 QOS Speed evt */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
		struct be_async_event_grp5_qos_link_speed *evt)
{
	if (evt->physical_port == adapter->port_num) {
		/* qos_link_speed is in units of 10 Mbps */
		adapter->link_speed = evt->qos_link_speed * 10;
	}
}

static void be_async_grp5_evt_process(struct be_adapter *adapter,
		u32 trailer, struct be_mcc_compl *evt)
{
	u8 event_type = 0;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
		be_async_grp5_cos_priority_process(adapter,
		(struct be_async_event_grp5_cos_priority *)evt);
	break;
	case ASYNC_EVENT_QOS_SPEED:
		be_async_grp5_qos_speed_process(adapter,
		(struct be_async_event_grp5_qos_link_speed *)evt);
	break;
	default:
		dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
		break;
	}
}

150 151
static inline bool is_link_state_evt(u32 trailer)
{
152
	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
153
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
154
				ASYNC_EVENT_CODE_LINK_STATE;
155
}
156

157 158 159 160 161 162 163
static inline bool is_grp5_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_GRP_5);
}

164
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
165
{
166
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
167
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
168 169 170 171 172 173 174 175

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
	adapter->mcc_obj.rearm_cq = false;
}

S
Sathya Perla 已提交
191
int be_process_mcc(struct be_adapter *adapter, int *status)
192
{
193
	struct be_mcc_compl *compl;
S
Sathya Perla 已提交
194
	int num = 0;
195
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
196

197 198
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
199 200
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
201 202
			if (is_link_state_evt(compl->flags))
				be_async_link_state_process(adapter,
203
				(struct be_async_event_link_state *) compl);
204 205 206
			else if (is_grp5_evt(compl->flags))
				be_async_grp5_evt_process(adapter,
				compl->flags, compl);
207
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
S
Sathya Perla 已提交
208
				*status = be_mcc_compl_process(adapter, compl);
209
				atomic_dec(&mcc_obj->q.used);
210 211 212 213
		}
		be_mcc_compl_use(compl);
		num++;
	}
214

215
	spin_unlock_bh(&adapter->mcc_cq_lock);
S
Sathya Perla 已提交
216
	return num;
217 218
}

219
/* Wait till no more pending mcc requests are present */
220
static int be_mcc_wait_compl(struct be_adapter *adapter)
221
{
222
#define mcc_timeout		120000 /* 12s timeout */
S
Sathya Perla 已提交
223 224 225
	int i, num, status = 0;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

226 227 228
	if (adapter->eeh_err)
		return -EIO;

229
	for (i = 0; i < mcc_timeout; i++) {
S
Sathya Perla 已提交
230 231 232 233
		num = be_process_mcc(adapter, &status);
		if (num)
			be_cq_notify(adapter, mcc_obj->cq.id,
				mcc_obj->rearm_cq, num);
234

S
Sathya Perla 已提交
235
		if (atomic_read(&mcc_obj->q.used) == 0)
236 237 238
			break;
		udelay(100);
	}
239
	if (i == mcc_timeout) {
240
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
241 242
		return -1;
	}
S
Sathya Perla 已提交
243
	return status;
244 245 246
}

/* Notify MCC requests and wait for completion */
247
static int be_mcc_notify_wait(struct be_adapter *adapter)
248
{
249
	be_mcc_notify(adapter);
250
	return be_mcc_wait_compl(adapter);
251 252
}

253
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
254
{
255
	int msecs = 0;
S
Sathya Perla 已提交
256 257
	u32 ready;

258 259 260 261 262 263
	if (adapter->eeh_err) {
		dev_err(&adapter->pdev->dev,
			"Error detected in card.Cannot issue commands\n");
		return -EIO;
	}

S
Sathya Perla 已提交
264
	do {
265 266 267 268 269 270 271 272
		ready = ioread32(db);
		if (ready == 0xffffffff) {
			dev_err(&adapter->pdev->dev,
				"pci slot disconnected\n");
			return -1;
		}

		ready &= MPU_MAILBOX_DB_RDY_MASK;
S
Sathya Perla 已提交
273 274 275
		if (ready)
			break;

276
		if (msecs > 4000) {
277
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
278
			be_detect_dump_ue(adapter);
S
Sathya Perla 已提交
279 280 281
			return -1;
		}

282 283 284
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(msecs_to_jiffies(1));
		msecs++;
S
Sathya Perla 已提交
285 286 287 288 289 290 291
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
292
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
293
 */
294
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
295 296 297
{
	int status;
	u32 val = 0;
298 299
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
300
	struct be_mcc_mailbox *mbox = mbox_mem->va;
301
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
302

303 304 305 306 307
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

S
Sathya Perla 已提交
308 309 310 311 312 313
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
314
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
315 316 317 318 319 320 321 322
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

323
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
324 325 326
	if (status != 0)
		return status;

327
	/* A cq entry has been made now */
328 329 330
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
331 332 333
		if (status)
			return status;
	} else {
334
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
335 336
		return -1;
	}
337
	return 0;
S
Sathya Perla 已提交
338 339
}

340
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
341
{
342 343 344 345 346 347
	u32 sem;

	if (lancer_chip(adapter))
		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
	else
		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
348 349 350 351 352 353 354 355

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

356
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
357
{
358 359
	u16 stage;
	int status, timeout = 0;
S
Sathya Perla 已提交
360

361 362 363 364 365 366 367 368 369 370 371 372 373
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
				stage);
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(2 * HZ);
			timeout += 2;
		} else {
			return 0;
		}
374
	} while (timeout < 40);
S
Sathya Perla 已提交
375

376 377
	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
	return -1;
S
Sathya Perla 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
392
				bool embedded, u8 sge_cnt, u32 opcode)
S
Sathya Perla 已提交
393 394 395 396 397 398 399
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
400
	wrb->tag0 = opcode;
401
	be_dws_cpu_to_le(wrb, 8);
S
Sathya Perla 已提交
402 403 404 405 406 407 408 409 410
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
411
	req_hdr->version = 0;
S
Sathya Perla 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

452
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
453
{
454 455 456 457 458
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
459 460
}

461
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
462
{
463 464 465
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

466 467 468 469 470
	if (atomic_read(&mccq->used) >= mccq->len) {
		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
		return NULL;
	}

471 472 473 474
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
475 476 477
	return wrb;
}

478 479 480 481 482 483 484 485
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

486 487
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
488 489

	wrb = (u8 *)wrb_from_mbox(adapter);
S
Sathya Perla 已提交
490 491 492 493 494 495 496 497
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
498 499 500

	status = be_mbox_notify_wait(adapter);

501
	mutex_unlock(&adapter->mbox_lock);
502 503 504 505 506 507 508 509 510 511 512
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

513 514 515
	if (adapter->eeh_err)
		return -EIO;

516 517
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
518 519 520 521 522 523 524 525 526 527 528 529 530

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

531
	mutex_unlock(&adapter->mbox_lock);
532 533
	return status;
}
534
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
535 536
		struct be_queue_info *eq, int eq_delay)
{
537 538
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
539 540 541
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

542 543
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
544 545 546

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
547

548
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
S
Sathya Perla 已提交
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

566
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
567
	if (!status) {
568
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
569 570 571
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
572

573
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
574 575 576
	return status;
}

577
/* Uses mbox */
578
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
579 580
			u8 type, bool permanent, u32 if_handle)
{
581 582
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
583 584
	int status;

585 586
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
587 588 589

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
590

591 592
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_MAC_QUERY);
S
Sathya Perla 已提交
593 594 595 596 597 598 599 600

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
601
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
602 603 604
		req->permanent = 0;
	}

605 606 607
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
608
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
609
	}
S
Sathya Perla 已提交
610

611
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
612 613 614
	return status;
}

615
/* Uses synchronous MCCQ */
616
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
617
		u32 if_id, u32 *pmac_id, u32 domain)
S
Sathya Perla 已提交
618
{
619 620
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
621 622
	int status;

623 624 625
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
626 627 628 629
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
630
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
631

632 633
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_ADD);
S
Sathya Perla 已提交
634 635 636 637

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

638
	req->hdr.domain = domain;
S
Sathya Perla 已提交
639 640 641
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

642
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
643 644 645 646 647
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

648
err:
649
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
650 651 652
	return status;
}

653
/* Uses synchronous MCCQ */
654
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
S
Sathya Perla 已提交
655
{
656 657
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
658 659
	int status;

660 661 662
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
663 664 665 666
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
667
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
668

669 670
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_PMAC_DEL);
S
Sathya Perla 已提交
671 672 673 674

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

675
	req->hdr.domain = dom;
S
Sathya Perla 已提交
676 677 678
	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

679 680
	status = be_mcc_notify_wait(adapter);

681
err:
682
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
683 684 685
	return status;
}

686
/* Uses Mbox */
687
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
688 689 690
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
691 692
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
693
	struct be_dma_mem *q_mem = &cq->dma_mem;
694
	void *ctxt;
S
Sathya Perla 已提交
695 696
	int status;

697 698
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
699 700 701 702

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
703

704 705
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_CQ_CREATE);
S
Sathya Perla 已提交
706 707 708 709 710

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
		req->page_size = 1; /* 1 for 4K */
		AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
								coalesce_wm);
		AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
								no_delay);
		AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
								ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
								ctxt, eq->id);
		AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
	} else {
		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
								coalesce_wm);
		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
								ctxt, no_delay);
		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, solevent,
								ctxt, sol_evts);
		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
		AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
	}
S
Sathya Perla 已提交
740 741 742 743 744

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

745
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
746
	if (!status) {
747
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
748 749 750
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
751

752
	mutex_unlock(&adapter->mbox_lock);
753 754 755 756 757 758 759 760 761 762 763 764

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

765
int be_cmd_mccq_create(struct be_adapter *adapter,
766 767 768
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
769 770
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
771
	struct be_dma_mem *q_mem = &mccq->dma_mem;
772
	void *ctxt;
773 774
	int status;

775 776
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
777 778 779 780

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
781

782
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
783
			OPCODE_COMMON_MCC_CREATE_EXT);
784 785

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
786
			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
787

788
	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
		req->cq_id = cpu_to_le16(cq->id);

		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
						be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
								ctxt, cq->id);
		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
								 ctxt, 1);

	} else {
		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
						be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
	}
807

808
	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
809
	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
810 811 812 813
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

814
	status = be_mbox_notify_wait(adapter);
815 816 817 818 819
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
820
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
821 822 823 824

	return status;
}

825
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
826 827 828
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
829 830
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
831
	struct be_dma_mem *q_mem = &txq->dma_mem;
832
	void *ctxt;
S
Sathya Perla 已提交
833 834
	int status;

835 836
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
837 838 839 840

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
841

842 843
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_TX_CREATE);
S
Sathya Perla 已提交
844 845 846 847 848 849 850 851

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

852 853
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
854 855 856 857 858 859 860
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

861
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
862 863 864 865 866
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
867

868
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
869 870 871 872

	return status;
}

873
/* Uses mbox */
874
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
875
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
876
		u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
S
Sathya Perla 已提交
877
{
878 879
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
880 881 882
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

883 884
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
885 886 887

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
888

889 890
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_ETH_RX_CREATE);
S
Sathya Perla 已提交
891 892 893 894 895 896 897 898 899 900 901 902

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

903
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
904 905 906 907
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
908
		*rss_id = resp->rss_id;
S
Sathya Perla 已提交
909
	}
910

911
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
912 913 914 915

	return status;
}

916 917 918
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
919
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
920 921
		int queue_type)
{
922 923
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
924 925 926
	u8 subsys = 0, opcode = 0;
	int status;

927 928 929
	if (adapter->eeh_err)
		return -EIO;

930 931
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
S
Sathya Perla 已提交
932

933 934 935
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
953 954 955 956
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
957
	default:
958
		BUG();
S
Sathya Perla 已提交
959
	}
960 961 962

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);

S
Sathya Perla 已提交
963 964 965
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

966
	status = be_mbox_notify_wait(adapter);
967

968
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
969 970 971 972

	return status;
}

973 974 975
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
976
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
977 978
		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
		u32 domain)
S
Sathya Perla 已提交
979
{
980 981
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
982 983
	int status;

984 985
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
986 987 988

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
989

990 991
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_CREATE);
S
Sathya Perla 已提交
992 993 994 995

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

996
	req->hdr.domain = domain;
997 998
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
999
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
1000 1001 1002
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

1003
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1004 1005 1006 1007 1008 1009 1010
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

1011
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1012 1013 1014
	return status;
}

1015
/* Uses mbox */
1016
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
S
Sathya Perla 已提交
1017
{
1018 1019
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
1020 1021
	int status;

1022 1023 1024
	if (adapter->eeh_err)
		return -EIO;

1025 1026
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1027 1028 1029

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1030

1031 1032
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
S
Sathya Perla 已提交
1033 1034 1035 1036

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

1037
	req->hdr.domain = domain;
S
Sathya Perla 已提交
1038
	req->interface_id = cpu_to_le32(interface_id);
1039 1040

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1041

1042
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1043 1044 1045 1046 1047 1048

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
1049
 * Uses asynchronous MCC
S
Sathya Perla 已提交
1050
 */
1051
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
1052
{
1053 1054 1055
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
1056
	int status = 0;
S
Sathya Perla 已提交
1057

1058
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1059

1060
	wrb = wrb_from_mccq(adapter);
1061 1062 1063 1064
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1065 1066
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
1067

1068 1069
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_GET_STATISTICS);
S
Sathya Perla 已提交
1070 1071 1072 1073 1074 1075 1076

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

1077
	be_mcc_notify(adapter);
1078
	adapter->stats_ioctl_sent = true;
S
Sathya Perla 已提交
1079

1080
err:
1081
	spin_unlock_bh(&adapter->mcc_lock);
1082
	return status;
S
Sathya Perla 已提交
1083 1084
}

1085
/* Uses synchronous mcc */
1086
int be_cmd_link_status_query(struct be_adapter *adapter,
1087
			bool *link_up, u8 *mac_speed, u16 *link_speed)
S
Sathya Perla 已提交
1088
{
1089 1090
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
1091 1092
	int status;

1093 1094 1095
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1096 1097 1098 1099
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1100
	req = embedded_payload(wrb);
1101 1102

	*link_up = false;
S
Sathya Perla 已提交
1103

1104 1105
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
S
Sathya Perla 已提交
1106 1107 1108 1109

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

1110
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1111 1112
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1113
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1114
			*link_up = true;
1115 1116 1117
			*link_speed = le16_to_cpu(resp->link_speed);
			*mac_speed = resp->mac_speed;
		}
S
Sathya Perla 已提交
1118 1119
	}

1120
err:
1121
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1122 1123 1124
	return status;
}

1125
/* Uses Mbox */
1126
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
1127
{
1128 1129
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
1130 1131
	int status;

1132 1133
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1134 1135 1136

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1137

1138 1139
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FW_VERSION);
S
Sathya Perla 已提交
1140 1141 1142 1143

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

1144
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1145 1146 1147 1148 1149
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

1150
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1151 1152 1153
	return status;
}

1154 1155 1156
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
1157
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
1158
{
1159 1160
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
1161
	int status = 0;
S
Sathya Perla 已提交
1162

1163 1164 1165
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1166 1167 1168 1169
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1170
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1171

1172 1173
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_MODIFY_EQ_DELAY);
S
Sathya Perla 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

1183
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
1184

1185
err:
1186
	spin_unlock_bh(&adapter->mcc_lock);
1187
	return status;
S
Sathya Perla 已提交
1188 1189
}

1190
/* Uses sycnhronous mcc */
1191
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
1192 1193
			u32 num, bool untagged, bool promiscuous)
{
1194 1195
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
1196 1197
	int status;

1198 1199 1200
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1201 1202 1203 1204
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1205
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1206

1207 1208
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_NTWK_VLAN_CONFIG);
S
Sathya Perla 已提交
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

1222
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1223

1224
err:
1225
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1226 1227 1228
	return status;
}

1229 1230 1231
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
1232
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
1233
{
1234 1235
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
1236
	int status;
S
Sathya Perla 已提交
1237

1238
	spin_lock_bh(&adapter->mcc_lock);
1239

1240
	wrb = wrb_from_mccq(adapter);
1241 1242 1243 1244
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1245
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1246

1247
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
S
Sathya Perla 已提交
1248 1249 1250 1251

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

1252 1253 1254 1255
	/* In FW versions X.102.149/X.101.487 and later,
	 * the port setting associated only with the
	 * issuing pci function will take effect
	 */
S
Sathya Perla 已提交
1256 1257 1258 1259 1260
	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

1261
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1262

1263
err:
1264
	spin_unlock_bh(&adapter->mcc_lock);
1265
	return status;
S
Sathya Perla 已提交
1266 1267
}

1268
/*
1269
 * Uses MCC for this command as it may be called in BH context
1270 1271
 * (mc == NULL) => multicast promiscous
 */
1272
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1273
		struct net_device *netdev, struct be_dma_mem *mem)
S
Sathya Perla 已提交
1274
{
1275
	struct be_mcc_wrb *wrb;
1276 1277 1278
	struct be_cmd_req_mcast_mac_config *req = mem->va;
	struct be_sge *sge;
	int status;
S
Sathya Perla 已提交
1279

1280
	spin_lock_bh(&adapter->mcc_lock);
1281

1282
	wrb = wrb_from_mccq(adapter);
1283 1284 1285 1286
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1287 1288
	sge = nonembedded_sgl(wrb);
	memset(req, 0, sizeof(*req));
S
Sathya Perla 已提交
1289

1290 1291
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_NTWK_MULTICAST_SET);
1292 1293 1294
	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(mem->size);
S
Sathya Perla 已提交
1295 1296 1297 1298 1299

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
1300
	if (netdev) {
1301
		int i;
1302
		struct netdev_hw_addr *ha;
1303

1304
		req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1305

1306
		i = 0;
1307
		netdev_for_each_mc_addr(ha, netdev)
1308
			memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1309 1310
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1311 1312
	}

1313
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1314

1315
err:
1316
	spin_unlock_bh(&adapter->mcc_lock);
1317
	return status;
S
Sathya Perla 已提交
1318 1319
}

1320
/* Uses synchrounous mcc */
1321
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1322
{
1323 1324
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1325 1326
	int status;

1327
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1328

1329
	wrb = wrb_from_mccq(adapter);
1330 1331 1332 1333
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1334
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1335

1336 1337
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_SET_FLOW_CONTROL);
S
Sathya Perla 已提交
1338 1339 1340 1341 1342 1343 1344

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1345
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1346

1347
err:
1348
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1349 1350 1351
	return status;
}

1352
/* Uses sycn mcc */
1353
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1354
{
1355 1356
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1357 1358
	int status;

1359
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1360

1361
	wrb = wrb_from_mccq(adapter);
1362 1363 1364 1365
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1366
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1367

1368 1369
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_FLOW_CONTROL);
S
Sathya Perla 已提交
1370 1371 1372 1373

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1374
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1375 1376 1377 1378 1379 1380 1381
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1382
err:
1383
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1384 1385 1386
	return status;
}

1387
/* Uses mbox */
1388 1389
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
		u32 *mode, u32 *caps)
S
Sathya Perla 已提交
1390
{
1391 1392
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1393 1394
	int status;

1395 1396
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
S
Sathya Perla 已提交
1397

1398 1399
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1400

1401 1402
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
S
Sathya Perla 已提交
1403 1404 1405 1406

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1407
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1408 1409 1410
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
A
Ajit Khaparde 已提交
1411
		*mode = le32_to_cpu(resp->function_mode);
1412
		*caps = le32_to_cpu(resp->function_caps);
S
Sathya Perla 已提交
1413 1414
	}

1415
	mutex_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1416 1417
	return status;
}
1418

1419
/* Uses mbox */
1420 1421
int be_cmd_reset_function(struct be_adapter *adapter)
{
1422 1423
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1424 1425
	int status;

1426 1427
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1428

1429 1430
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1431

1432 1433
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_FUNCTION_RESET);
1434 1435 1436 1437

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1438
	status = be_mbox_notify_wait(adapter);
1439

1440
	mutex_unlock(&adapter->mbox_lock);
1441 1442
	return status;
}
1443

1444 1445 1446 1447 1448 1449 1450
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_rss_config *req;
	u32 myhash[10];
	int status;

1451 1452
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
		OPCODE_ETH_RSS_CONFIG);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_RSS_CONFIG, sizeof(*req));

	req->if_id = cpu_to_le32(adapter->if_handle);
	req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
	memcpy(req->cpu_table, rsstable, table_size);
	memcpy(req->hash, myhash, sizeof(myhash));
	be_dws_cpu_to_le(req->hash, sizeof(req->hash));

	status = be_mbox_notify_wait(adapter);

1472
	mutex_unlock(&adapter->mbox_lock);
1473 1474 1475
	return status;
}

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
			u8 bcn, u8 sts, u8 state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1487 1488 1489 1490
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1491 1492
	req = embedded_payload(wrb);

1493 1494
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

1506
err:
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1521 1522 1523 1524
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1525 1526
	req = embedded_payload(wrb);

1527 1528
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
			OPCODE_COMMON_GET_BEACON_STATE);
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
		*state = resp->beacon_state;
	}

1542
err:
1543 1544 1545 1546
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1547 1548 1549
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1550
	struct be_mcc_wrb *wrb;
1551
	struct be_cmd_write_flashrom *req;
1552
	struct be_sge *sge;
1553 1554
	int status;

1555
	spin_lock_bh(&adapter->mcc_lock);
1556
	adapter->flash_status = 0;
1557 1558

	wrb = wrb_from_mccq(adapter);
1559 1560
	if (!wrb) {
		status = -EBUSY;
D
Dan Carpenter 已提交
1561
		goto err_unlock;
1562 1563
	}
	req = cmd->va;
1564 1565
	sge = nonembedded_sgl(wrb);

1566 1567
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
			OPCODE_COMMON_WRITE_FLASHROM);
1568
	wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1580 1581 1582 1583 1584 1585 1586 1587
	be_mcc_notify(adapter);
	spin_unlock_bh(&adapter->mcc_lock);

	if (!wait_for_completion_timeout(&adapter->flash_compl,
			msecs_to_jiffies(12000)))
		status = -1;
	else
		status = adapter->flash_status;
1588

D
Dan Carpenter 已提交
1589 1590 1591 1592
	return status;

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
1593 1594
	return status;
}
1595

1596 1597
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
			 int offset)
1598 1599 1600 1601 1602 1603 1604 1605
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_write_flashrom *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1606 1607 1608 1609
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1610 1611
	req = embedded_payload(wrb);

1612 1613
	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
			OPCODE_COMMON_READ_FLASHROM);
1614 1615 1616 1617

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);

1618
	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1619
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1620 1621
	req->params.offset = cpu_to_le32(offset);
	req->params.data_buf_size = cpu_to_le32(0x4);
1622 1623 1624 1625 1626

	status = be_mcc_notify_wait(adapter);
	if (!status)
		memcpy(flashed_crc, req->params.data_buf, 4);

1627
err:
1628 1629 1630
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1631

1632
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
	memcpy(req->magic_mac, mac, ETH_ALEN);

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
			u8 loopback_type, u8 enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_lmode *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
			sizeof(*req));

	req->src_port = port_num;
	req->dest_port = port_num;
	req->loopback_type = loopback_type;
	req->loopback_state = enable;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_loopback_test *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_LOWLEVEL_LOOPBACK_TEST);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1725
	req->hdr.timeout = cpu_to_le32(4);
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795

	req->pattern = cpu_to_le64(pattern);
	req->src_port = cpu_to_le32(port_num);
	req->dest_port = cpu_to_le32(port_num);
	req->pkt_size = cpu_to_le32(pkt_size);
	req->num_pkts = cpu_to_le32(num_pkts);
	req->loopback_type = cpu_to_le32(loopback_type);

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
		status = le32_to_cpu(resp->status);
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
				u32 byte_cnt, struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_ddrdma_test *req;
	struct be_sge *sge;
	int status;
	int i, j = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
	sge = nonembedded_sgl(wrb);
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
				OPCODE_LOWLEVEL_HOST_DDR_DMA);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);

	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->pattern = cpu_to_le64(pattern);
	req->byte_count = cpu_to_le32(byte_cnt);
	for (i = 0; i < byte_cnt; i++) {
		req->snd_buff[i] = (u8)(pattern >> (j*8));
		j++;
		if (j > 7)
			j = 0;
	}

	status = be_mcc_notify_wait(adapter);

	if (!status) {
		struct be_cmd_resp_ddrdma_test *resp;
		resp = cmd->va;
		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
				resp->snd_err) {
			status = -1;
		}
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1796

1797
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
				struct be_dma_mem *nonemb_cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_seeprom_read *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1808 1809 1810 1811
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
			OPCODE_COMMON_SEEPROM_READ);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_SEEPROM_READ, sizeof(*req));

	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	status = be_mcc_notify_wait(adapter);

1827
err:
1828 1829 1830
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865

int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_phy_info *req;
	struct be_sge *sge;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd->va;
	sge = nonembedded_sgl(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
				OPCODE_COMMON_GET_PHY_DETAILS);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_GET_PHY_DETAILS,
			sizeof(*req));

	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889

int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_qos *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
				OPCODE_COMMON_SET_QOS);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_SET_QOS, sizeof(*req));

	req->hdr.domain = domain;
1890 1891
	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
	req->max_bps_nic = cpu_to_le32(bps);
1892 1893 1894 1895 1896 1897 1898

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}