be_cmds.c 28.4 KB
Newer Older
S
Sathya Perla 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2005 - 2009 ServerEngines
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
S
Sathya Perla 已提交
20

21
static void be_mcc_notify(struct be_adapter *adapter)
22
{
23
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 25 26 27
	u32 val = 0;

	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 30 31 32 33
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
34
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 36 37 38 39 40 41 42 43 44 45
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
46
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 48 49 50
{
	compl->flags = 0;
}

51
static int be_mcc_compl_process(struct be_adapter *adapter,
52
	struct be_mcc_compl *compl)
53 54 55 56 57 58 59 60 61
{
	u16 compl_status, extd_status;

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
62 63 64 65 66 67 68 69 70
	if (compl_status == MCC_STATUS_SUCCESS) {
		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
			struct be_cmd_resp_get_stats *resp =
						adapter->stats.cmd.va;
			be_dws_le_to_cpu(&resp->hw_stats,
						sizeof(resp->hw_stats));
			netdev_stats_update(adapter);
		}
	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71 72
		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
				CQE_STATUS_EXTD_MASK;
73 74
		dev_warn(&adapter->pdev->dev,
			"Error in cmd completion: status(compl/extd)=%d/%d\n",
75 76
			compl_status, extd_status);
	}
77
	return compl_status;
78 79
}

80
/* Link state evt is a string of bytes; no need for endian swapping */
81
static void be_async_link_state_process(struct be_adapter *adapter,
82 83
		struct be_async_event_link_state *evt)
{
84 85
	be_link_status_update(adapter,
		evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 87 88 89 90 91 92 93
}

static inline bool is_link_state_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_LINK_STATE);
}
94

95
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96
{
97
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99 100 101 102 103 104 105 106

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

107
int be_process_mcc(struct be_adapter *adapter)
108
{
109
	struct be_mcc_compl *compl;
110
	int num = 0, status = 0;
111

112 113
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
114 115 116 117 118
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
			BUG_ON(!is_link_state_evt(compl->flags));

			/* Interpret compl as a async link evt */
119
			be_async_link_state_process(adapter,
120
				(struct be_async_event_link_state *) compl);
121 122 123
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
				status = be_mcc_compl_process(adapter, compl);
				atomic_dec(&adapter->mcc_obj.q.used);
124 125 126 127
		}
		be_mcc_compl_use(compl);
		num++;
	}
128

129
	if (num)
130
		be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131

132
	spin_unlock_bh(&adapter->mcc_cq_lock);
133
	return status;
134 135
}

136
/* Wait till no more pending mcc requests are present */
137
static int be_mcc_wait_compl(struct be_adapter *adapter)
138
{
139 140
#define mcc_timeout		120000 /* 12s timeout */
	int i, status;
141
	for (i = 0; i < mcc_timeout; i++) {
142 143 144 145
		status = be_process_mcc(adapter);
		if (status)
			return status;

146
		if (atomic_read(&adapter->mcc_obj.q.used) == 0)
147 148 149
			break;
		udelay(100);
	}
150
	if (i == mcc_timeout) {
151
		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152 153 154
		return -1;
	}
	return 0;
155 156 157
}

/* Notify MCC requests and wait for completion */
158
static int be_mcc_notify_wait(struct be_adapter *adapter)
159
{
160
	be_mcc_notify(adapter);
161
	return be_mcc_wait_compl(adapter);
162 163
}

164
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
S
Sathya Perla 已提交
165 166 167 168 169 170 171 172 173
{
	int cnt = 0, wait = 5;
	u32 ready;

	do {
		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
		if (ready)
			break;

174
		if (cnt > 4000000) {
175
			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
S
Sathya Perla 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189
			return -1;
		}

		if (cnt > 50)
			wait = 200;
		cnt += wait;
		udelay(wait);
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
190
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
S
Sathya Perla 已提交
191
 */
192
static int be_mbox_notify_wait(struct be_adapter *adapter)
S
Sathya Perla 已提交
193 194 195
{
	int status;
	u32 val = 0;
196 197
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
S
Sathya Perla 已提交
198
	struct be_mcc_mailbox *mbox = mbox_mem->va;
199
	struct be_mcc_compl *compl = &mbox->compl;
S
Sathya Perla 已提交
200 201 202 203 204 205 206

	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
207
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
208 209 210 211 212 213 214 215
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

216
	status = be_mbox_db_ready_wait(adapter, db);
S
Sathya Perla 已提交
217 218 219
	if (status != 0)
		return status;

220
	/* A cq entry has been made now */
221 222 223
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
224 225 226
		if (status)
			return status;
	} else {
227
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
S
Sathya Perla 已提交
228 229
		return -1;
	}
230
	return 0;
S
Sathya Perla 已提交
231 232
}

233
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
S
Sathya Perla 已提交
234
{
235
	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

244
int be_cmd_POST(struct be_adapter *adapter)
S
Sathya Perla 已提交
245 246 247
{
	u16 stage, error;

248
	error = be_POST_stage_get(adapter, &stage);
249 250 251 252
	if (error || stage != POST_STAGE_ARMFW_RDY) {
		dev_err(&adapter->pdev->dev, "POST failed.\n");
		return -1;
	}
S
Sathya Perla 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

	return 0;
}

static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
				bool embedded, u8 sge_cnt)
{
	if (embedded)
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	else
		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
				MCC_WRB_SGE_CNT_SHIFT;
	wrb->payload_length = payload_len;
	be_dws_cpu_to_le(wrb, 20);
}

/* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len)
{
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

327
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
S
Sathya Perla 已提交
328
{
329 330 331 332 333
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
S
Sathya Perla 已提交
334 335
}

336
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
337
{
338 339 340 341 342 343 344 345
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
346 347 348
	return wrb;
}

349
int be_cmd_eq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
350 351
		struct be_queue_info *eq, int eq_delay)
{
352 353
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
S
Sathya Perla 已提交
354 355 356
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

357
	spin_lock(&adapter->mbox_lock);
358 359 360

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
361 362 363 364 365 366 367 368 369

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
370
			be_pci_func(adapter));
S
Sathya Perla 已提交
371 372 373 374 375 376 377 378 379 380 381
	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

382
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
383
	if (!status) {
384
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
385 386 387
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
388

389
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
390 391 392
	return status;
}

393
/* Uses mbox */
394
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
395 396
			u8 type, bool permanent, u32 if_handle)
{
397 398
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
S
Sathya Perla 已提交
399 400
	int status;

401
	spin_lock(&adapter->mbox_lock);
402 403 404

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
405 406 407 408 409 410 411 412 413 414

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));

	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
415
		req->if_id = cpu_to_le16((u16) if_handle);
S
Sathya Perla 已提交
416 417 418
		req->permanent = 0;
	}

419 420 421
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
422
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
423
	}
S
Sathya Perla 已提交
424

425
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
426 427 428
	return status;
}

429
/* Uses synchronous MCCQ */
430
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
S
Sathya Perla 已提交
431 432
		u32 if_id, u32 *pmac_id)
{
433 434
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
S
Sathya Perla 已提交
435 436
	int status;

437 438 439 440
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
441 442 443 444 445 446 447 448 449

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

450
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
451 452 453 454 455
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

456
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
457 458 459
	return status;
}

460
/* Uses synchronous MCCQ */
461
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
S
Sathya Perla 已提交
462
{
463 464
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
S
Sathya Perla 已提交
465 466
	int status;

467 468 469 470
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
471 472 473 474 475 476 477 478 479

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));

	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

480 481 482
	status = be_mcc_notify_wait(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
483 484 485 486

	return status;
}

487
/* Uses Mbox */
488
int be_cmd_cq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
489 490 491
		struct be_queue_info *cq, struct be_queue_info *eq,
		bool sol_evts, bool no_delay, int coalesce_wm)
{
492 493
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
S
Sathya Perla 已提交
494
	struct be_dma_mem *q_mem = &cq->dma_mem;
495
	void *ctxt;
S
Sathya Perla 已提交
496 497
	int status;

498
	spin_lock(&adapter->mbox_lock);
499 500 501 502

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req));

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
			__ilog2_u32(cq->len/256));
	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
519
	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
520
	AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
S
Sathya Perla 已提交
521 522 523 524
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

525
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
526
	if (!status) {
527
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
S
Sathya Perla 已提交
528 529 530
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
531

532
	spin_unlock(&adapter->mbox_lock);
533 534 535 536 537 538 539 540 541 542 543 544

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

545
int be_cmd_mccq_create(struct be_adapter *adapter,
546 547 548
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
549 550
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
551
	struct be_dma_mem *q_mem = &mccq->dma_mem;
552
	void *ctxt;
553 554
	int status;

555
	spin_lock(&adapter->mbox_lock);
556 557 558 559

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
560 561 562 563 564 565 566 567

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);

568
	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
569 570 571 572 573 574 575 576 577
	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
		be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

578
	status = be_mbox_notify_wait(adapter);
579 580 581 582 583
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
584
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
585 586 587 588

	return status;
}

589
int be_cmd_txq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
590 591 592
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
593 594
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
S
Sathya Perla 已提交
595
	struct be_dma_mem *q_mem = &txq->dma_mem;
596
	void *ctxt;
S
Sathya Perla 已提交
597 598
	int status;

599
	spin_lock(&adapter->mbox_lock);
600 601 602 603

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
S
Sathya Perla 已提交
604 605 606 607 608 609 610 611 612 613

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
		sizeof(*req));

	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

614 615
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
S
Sathya Perla 已提交
616
	AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
617
			be_pci_func(adapter));
S
Sathya Perla 已提交
618 619 620 621 622 623 624
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

625
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
626 627 628 629 630
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
631

632
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
633 634 635 636

	return status;
}

637
/* Uses mbox */
638
int be_cmd_rxq_create(struct be_adapter *adapter,
S
Sathya Perla 已提交
639 640 641
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		u16 max_frame_size, u32 if_id, u32 rss)
{
642 643
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
S
Sathya Perla 已提交
644 645 646
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

647
	spin_lock(&adapter->mbox_lock);
648 649 650

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
		sizeof(*req));

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
	req->max_frame_size = cpu_to_le16(max_frame_size);
	req->rss_queue = cpu_to_le32(rss);

665
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
666 667 668 669 670
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
	}
671

672
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
673 674 675 676

	return status;
}

677 678 679
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
680
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
S
Sathya Perla 已提交
681 682
		int queue_type)
{
683 684
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
S
Sathya Perla 已提交
685 686 687
	u8 subsys = 0, opcode = 0;
	int status;

688
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
689

690 691 692
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

S
Sathya Perla 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
712 713 714 715
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
S
Sathya Perla 已提交
716
	default:
717
		BUG();
S
Sathya Perla 已提交
718 719 720 721
	}
	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
	req->id = cpu_to_le16(q->id);

722
	status = be_mbox_notify_wait(adapter);
723

724
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
725 726 727 728

	return status;
}

729 730 731
/* Create an rx filtering policy configuration on an i/f
 * Uses mbox
 */
732
int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
S
Sathya Perla 已提交
733 734
		bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
{
735 736
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
S
Sathya Perla 已提交
737 738
	int status;

739
	spin_lock(&adapter->mbox_lock);
740 741 742

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
743 744 745 746 747 748 749 750

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));

	req->capability_flags = cpu_to_le32(flags);
	req->enable_flags = cpu_to_le32(flags);
751
	req->pmac_invalid = pmac_invalid;
S
Sathya Perla 已提交
752 753 754
	if (!pmac_invalid)
		memcpy(req->mac_addr, mac, ETH_ALEN);

755
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
756 757 758 759 760 761 762
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
		if (!pmac_invalid)
			*pmac_id = le32_to_cpu(resp->pmac_id);
	}

763
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
764 765 766
	return status;
}

767
/* Uses mbox */
768
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
S
Sathya Perla 已提交
769
{
770 771
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
S
Sathya Perla 已提交
772 773
	int status;

774
	spin_lock(&adapter->mbox_lock);
775 776 777

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
778 779 780 781 782 783 784

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));

	req->interface_id = cpu_to_le32(interface_id);
785 786

	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
787

788
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
789 790 791 792 793 794

	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
795
 * Uses asynchronous MCC
S
Sathya Perla 已提交
796
 */
797
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
S
Sathya Perla 已提交
798
{
799 800 801
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_stats *req;
	struct be_sge *sge;
S
Sathya Perla 已提交
802

803
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
804

805 806 807
	wrb = wrb_from_mccq(adapter);
	req = nonemb_cmd->va;
	sge = nonembedded_sgl(wrb);
S
Sathya Perla 已提交
808 809

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
810
	wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
S
Sathya Perla 已提交
811 812 813 814 815 816 817

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

818
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
819

820 821
	spin_unlock_bh(&adapter->mcc_lock);
	return 0;
S
Sathya Perla 已提交
822 823
}

824
/* Uses synchronous mcc */
825
int be_cmd_link_status_query(struct be_adapter *adapter,
826
			bool *link_up)
S
Sathya Perla 已提交
827
{
828 829
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
S
Sathya Perla 已提交
830 831
	int status;

832 833 834 835
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
836 837

	*link_up = false;
S
Sathya Perla 已提交
838 839 840 841 842 843

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));

844
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
845 846
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
847 848
		if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
			*link_up = true;
S
Sathya Perla 已提交
849 850
	}

851
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
852 853 854
	return status;
}

855
/* Uses Mbox */
856
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
S
Sathya Perla 已提交
857
{
858 859
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
S
Sathya Perla 已提交
860 861
	int status;

862
	spin_lock(&adapter->mbox_lock);
863 864 865

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
866 867 868 869 870 871

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));

872
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
873 874 875 876 877
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
	}

878
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
879 880 881
	return status;
}

882 883 884
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
885
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
S
Sathya Perla 已提交
886
{
887 888
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
S
Sathya Perla 已提交
889

890 891 892 893
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
894 895 896 897 898 899 900 901 902 903 904

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));

	req->num_eq = cpu_to_le32(1);
	req->delay[0].eq_id = cpu_to_le32(eq_id);
	req->delay[0].phase = 0;
	req->delay[0].delay_multiplier = cpu_to_le32(eqd);

905
	be_mcc_notify(adapter);
S
Sathya Perla 已提交
906

907 908
	spin_unlock_bh(&adapter->mcc_lock);
	return 0;
S
Sathya Perla 已提交
909 910
}

911
/* Uses sycnhronous mcc */
912
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
S
Sathya Perla 已提交
913 914
			u32 num, bool untagged, bool promiscuous)
{
915 916
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
S
Sathya Perla 已提交
917 918
	int status;

919 920 921 922
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));

	req->interface_id = if_id;
	req->promiscuous = promiscuous;
	req->untagged = untagged;
	req->num_vlan = num;
	if (!promiscuous) {
		memcpy(req->normal_vlan, vtag_array,
			req->num_vlan * sizeof(vtag_array[0]));
	}

938
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
939

940
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
941 942 943
	return status;
}

944 945 946
/* Uses MCC for this command as it may be called in BH context
 * Uses synchronous mcc
 */
947
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
S
Sathya Perla 已提交
948
{
949 950
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_promiscuous_config *req;
951
	int status;
S
Sathya Perla 已提交
952

953
	spin_lock_bh(&adapter->mcc_lock);
954

955
	wrb = wrb_from_mccq(adapter);
956
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
957 958 959 960 961 962 963 964 965 966 967

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_PROMISCUOUS, sizeof(*req));

	if (port_num)
		req->port1_promiscuous = en;
	else
		req->port0_promiscuous = en;

968
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
969

970
	spin_unlock_bh(&adapter->mcc_lock);
971
	return status;
S
Sathya Perla 已提交
972 973
}

974
/*
975
 * Uses MCC for this command as it may be called in BH context
976 977
 * (mc == NULL) => multicast promiscous
 */
978
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
979
		struct dev_mc_list *mc_list, u32 mc_count)
S
Sathya Perla 已提交
980
{
981 982 983
#define BE_MAX_MC		32 /* set mcast promisc if > 32 */
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcast_mac_config *req;
S
Sathya Perla 已提交
984

985
	spin_lock_bh(&adapter->mcc_lock);
986

987
	wrb = wrb_from_mccq(adapter);
988
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
989 990 991 992 993 994 995

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));

	req->interface_id = if_id;
996 997 998 999 1000 1001 1002 1003 1004 1005
	if (mc_list && mc_count <= BE_MAX_MC) {
		int i;
		struct dev_mc_list *mc;

		req->num_mac = cpu_to_le16(mc_count);

		for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
			memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
	} else {
		req->promiscuous = 1;
S
Sathya Perla 已提交
1006 1007
	}

1008
	be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1009

1010
	spin_unlock_bh(&adapter->mcc_lock);
1011 1012

	return 0;
S
Sathya Perla 已提交
1013 1014
}

1015
/* Uses synchrounous mcc */
1016
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
S
Sathya Perla 已提交
1017
{
1018 1019
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
S
Sathya Perla 已提交
1020 1021
	int status;

1022
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1023

1024 1025
	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));

	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

1035
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1036

1037
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1038 1039 1040
	return status;
}

1041
/* Uses sycn mcc */
1042
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
S
Sathya Perla 已提交
1043
{
1044 1045
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
S
Sathya Perla 已提交
1046 1047
	int status;

1048
	spin_lock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1049

1050 1051
	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1052 1053 1054 1055 1056 1057

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));

1058
	status = be_mcc_notify_wait(adapter);
S
Sathya Perla 已提交
1059 1060 1061 1062 1063 1064 1065
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

1066
	spin_unlock_bh(&adapter->mcc_lock);
S
Sathya Perla 已提交
1067 1068 1069
	return status;
}

1070
/* Uses mbox */
1071
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
S
Sathya Perla 已提交
1072
{
1073 1074
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
S
Sathya Perla 已提交
1075 1076
	int status;

1077
	spin_lock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1078

1079 1080
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
S
Sathya Perla 已提交
1081 1082 1083 1084 1085 1086

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

1087
	status = be_mbox_notify_wait(adapter);
S
Sathya Perla 已提交
1088 1089 1090 1091 1092
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
		*port_num = le32_to_cpu(resp->phys_port);
	}

1093
	spin_unlock(&adapter->mbox_lock);
S
Sathya Perla 已提交
1094 1095
	return status;
}
1096

1097
/* Uses mbox */
1098 1099
int be_cmd_reset_function(struct be_adapter *adapter)
{
1100 1101
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
1102 1103 1104 1105
	int status;

	spin_lock(&adapter->mbox_lock);

1106 1107
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
1108 1109 1110 1111 1112 1113

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));

1114
	status = be_mbox_notify_wait(adapter);
1115 1116 1117 1118

	spin_unlock(&adapter->mbox_lock);
	return status;
}
1119 1120 1121 1122

int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
			u32 flash_type, u32 flash_opcode, u32 buf_size)
{
1123
	struct be_mcc_wrb *wrb;
1124
	struct be_cmd_write_flashrom *req = cmd->va;
1125
	struct be_sge *sge;
1126 1127
	int status;

1128 1129 1130 1131 1132 1133
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	req = embedded_payload(wrb);
	sge = nonembedded_sgl(wrb);

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	be_wrb_hdr_prepare(wrb, cmd->size, false, 1);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(cmd->size);

	req->params.op_type = cpu_to_le32(flash_type);
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

1146
	status = be_mcc_notify_wait(adapter);
1147

1148
	spin_unlock_bh(&adapter->mcc_lock);
1149 1150
	return status;
}