mcdi.c 51.6 KB
Newer Older
1
/****************************************************************************
B
Ben Hutchings 已提交
2 3
 * Driver for Solarflare network controllers and boards
 * Copyright 2008-2013 Solarflare Communications Inc.
4 5 6 7 8 9 10
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/delay.h>
11
#include <asm/cmpxchg.h>
12 13 14
#include "net_driver.h"
#include "nic.h"
#include "io.h"
15
#include "farch_regs.h"
16 17 18 19 20 21 22 23 24 25
#include "mcdi_pcol.h"
#include "phy.h"

/**************************************************************************
 *
 * Management-Controller-to-Driver Interface
 *
 **************************************************************************
 */

26
#define MCDI_RPC_TIMEOUT       (10 * HZ)
27

28 29
/* A reboot/assertion causes the MCDI status word to be set after the
 * command word is set or a REBOOT event is sent. If we notice a reboot
30
 * via these mechanisms then wait 250ms for the status word to be set.
31
 */
32
#define MCDI_STATUS_DELAY_US		100
33
#define MCDI_STATUS_DELAY_COUNT		2500
34 35
#define MCDI_STATUS_SLEEP_MS						\
	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 37 38 39

#define SEQ_MASK							\
	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))

40 41 42 43 44
struct efx_mcdi_async_param {
	struct list_head list;
	unsigned int cmd;
	size_t inlen;
	size_t outlen;
E
Edward Cree 已提交
45
	bool quiet;
46 47 48 49 50 51
	efx_mcdi_async_completer *complete;
	unsigned long cookie;
	/* followed by request/response buffer */
};

static void efx_mcdi_timeout_async(unsigned long context);
52 53
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
			       bool *was_attached_out);
54
static bool efx_mcdi_poll_once(struct efx_nic *efx);
55
static void efx_mcdi_abandon(struct efx_nic *efx);
56

57
int efx_mcdi_init(struct efx_nic *efx)
58 59
{
	struct efx_mcdi_iface *mcdi;
60 61
	bool already_attached;
	int rc;
62

63 64 65 66
	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
	if (!efx->mcdi)
		return -ENOMEM;

67
	mcdi = efx_mcdi(efx);
68
	mcdi->efx = efx;
69 70
	init_waitqueue_head(&mcdi->wq);
	spin_lock_init(&mcdi->iface_lock);
71
	mcdi->state = MCDI_STATE_QUIESCENT;
72
	mcdi->mode = MCDI_MODE_POLL;
73 74 75 76
	spin_lock_init(&mcdi->async_lock);
	INIT_LIST_HEAD(&mcdi->async_list);
	setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
		    (unsigned long)mcdi);
77 78

	(void) efx_mcdi_poll_reboot(efx);
79
	mcdi->new_epoch = true;
80 81

	/* Recover from a failed assertion before probing */
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	rc = efx_mcdi_handle_assertion(efx);
	if (rc)
		return rc;

	/* Let the MC (and BMC, if this is a LOM) know that the driver
	 * is loaded. We should do this before we reset the NIC.
	 */
	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "Unable to register driver with MCPU\n");
		return rc;
	}
	if (already_attached)
		/* Not a fatal error */
		netif_err(efx, probe, efx->net_dev,
			  "Host already registered with MCPU\n");

100 101 102 103
	if (efx->mcdi->fn_flags &
	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
		efx->primary = efx;

104
	return 0;
105 106
}

107 108
void efx_mcdi_fini(struct efx_nic *efx)
{
109 110 111 112 113 114 115 116
	if (!efx->mcdi)
		return;

	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);

	/* Relinquish the device (back to the BMC, if this is a LOM) */
	efx_mcdi_drv_attach(efx, false, NULL);

117 118 119
	kfree(efx->mcdi);
}

120 121
static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
				  const efx_dword_t *inbuf, size_t inlen)
122 123
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
B
Ben Hutchings 已提交
124 125
	efx_dword_t hdr[2];
	size_t hdr_len;
126 127
	u32 xflags, seqno;

128
	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
129

130 131 132 133 134
	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
	spin_lock_bh(&mcdi->iface_lock);
	++mcdi->seqno;
	spin_unlock_bh(&mcdi->iface_lock);

135 136 137 138 139
	seqno = mcdi->seqno & SEQ_MASK;
	xflags = 0;
	if (mcdi->mode == MCDI_MODE_EVENTS)
		xflags |= MCDI_HEADER_XFLAGS_EVREQ;

B
Ben Hutchings 已提交
140 141
	if (efx->type->mcdi_max_ver == 1) {
		/* MCDI v1 */
142
		EFX_POPULATE_DWORD_7(hdr[0],
B
Ben Hutchings 已提交
143 144 145 146 147
				     MCDI_HEADER_RESPONSE, 0,
				     MCDI_HEADER_RESYNC, 1,
				     MCDI_HEADER_CODE, cmd,
				     MCDI_HEADER_DATALEN, inlen,
				     MCDI_HEADER_SEQ, seqno,
148 149
				     MCDI_HEADER_XFLAGS, xflags,
				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
B
Ben Hutchings 已提交
150 151 152 153
		hdr_len = 4;
	} else {
		/* MCDI v2 */
		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
154
		EFX_POPULATE_DWORD_7(hdr[0],
B
Ben Hutchings 已提交
155 156 157 158 159
				     MCDI_HEADER_RESPONSE, 0,
				     MCDI_HEADER_RESYNC, 1,
				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
				     MCDI_HEADER_DATALEN, 0,
				     MCDI_HEADER_SEQ, seqno,
160 161
				     MCDI_HEADER_XFLAGS, xflags,
				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
B
Ben Hutchings 已提交
162 163 164 165 166
		EFX_POPULATE_DWORD_2(hdr[1],
				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
		hdr_len = 8;
	}
167

B
Ben Hutchings 已提交
168
	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
169 170

	mcdi->new_epoch = false;
171 172
}

173 174 175 176 177 178 179 180
static int efx_mcdi_errno(unsigned int mcdi_err)
{
	switch (mcdi_err) {
	case 0:
		return 0;
#define TRANSLATE_ERROR(name)					\
	case MC_CMD_ERR_ ## name:				\
		return -name;
B
Ben Hutchings 已提交
181
	TRANSLATE_ERROR(EPERM);
182 183
	TRANSLATE_ERROR(ENOENT);
	TRANSLATE_ERROR(EINTR);
B
Ben Hutchings 已提交
184
	TRANSLATE_ERROR(EAGAIN);
185 186 187 188 189 190
	TRANSLATE_ERROR(EACCES);
	TRANSLATE_ERROR(EBUSY);
	TRANSLATE_ERROR(EINVAL);
	TRANSLATE_ERROR(EDEADLK);
	TRANSLATE_ERROR(ENOSYS);
	TRANSLATE_ERROR(ETIME);
B
Ben Hutchings 已提交
191 192
	TRANSLATE_ERROR(EALREADY);
	TRANSLATE_ERROR(ENOSPC);
193
#undef TRANSLATE_ERROR
194 195
	case MC_CMD_ERR_ENOTSUP:
		return -EOPNOTSUPP;
B
Ben Hutchings 已提交
196 197 198 199
	case MC_CMD_ERR_ALLOC_FAIL:
		return -ENOBUFS;
	case MC_CMD_ERR_MAC_EXIST:
		return -EADDRINUSE;
200
	default:
B
Ben Hutchings 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
		return -EPROTO;
	}
}

static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
	unsigned int respseq, respcmd, error;
	efx_dword_t hdr;

	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);

	if (respcmd != MC_CMD_V2_EXTN) {
		mcdi->resp_hdr_len = 4;
		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
	} else {
		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
		mcdi->resp_hdr_len = 8;
		mcdi->resp_data_len =
			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
	}

	if (error && mcdi->resp_data_len == 0) {
		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
		mcdi->resprc = -EIO;
	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
		netif_err(efx, hw, efx->net_dev,
			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
			  respseq, mcdi->seqno);
		mcdi->resprc = -EIO;
	} else if (error) {
		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
		mcdi->resprc =
			efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
	} else {
		mcdi->resprc = 0;
240 241 242
	}
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
static bool efx_mcdi_poll_once(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);

	rmb();
	if (!efx->type->mcdi_poll_response(efx))
		return false;

	spin_lock_bh(&mcdi->iface_lock);
	efx_mcdi_read_response_header(efx);
	spin_unlock_bh(&mcdi->iface_lock);

	return true;
}

258 259 260
static int efx_mcdi_poll(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
261
	unsigned long time, finish;
262 263
	unsigned int spins;
	int rc;
264 265

	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
266
	rc = efx_mcdi_poll_reboot(efx);
B
Ben Hutchings 已提交
267
	if (rc) {
268
		spin_lock_bh(&mcdi->iface_lock);
B
Ben Hutchings 已提交
269 270 271
		mcdi->resprc = rc;
		mcdi->resp_hdr_len = 0;
		mcdi->resp_data_len = 0;
272
		spin_unlock_bh(&mcdi->iface_lock);
B
Ben Hutchings 已提交
273 274
		return 0;
	}
275 276 277 278 279 280

	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
	 * because generally mcdi responses are fast. After that, back off
	 * and poll once a jiffy (approximately)
	 */
	spins = TICK_USEC;
281
	finish = jiffies + MCDI_RPC_TIMEOUT;
282 283 284 285 286

	while (1) {
		if (spins != 0) {
			--spins;
			udelay(1);
287 288 289
		} else {
			schedule_timeout_uninterruptible(1);
		}
290

291
		time = jiffies;
292

293
		if (efx_mcdi_poll_once(efx))
294 295
			break;

296
		if (time_after(time, finish))
297 298 299 300 301 302 303
			return -ETIMEDOUT;
	}

	/* Return rc=0 like wait_event_timeout() */
	return 0;
}

304 305 306
/* Test and clear MC-rebooted flag for this port/function; reset
 * software state as necessary.
 */
307 308
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
309 310
	if (!efx->mcdi)
		return 0;
311

312
	return efx->type->mcdi_poll_reboot(efx);
313 314
}

315 316 317 318 319 320 321 322
static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
{
	return cmpxchg(&mcdi->state,
		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
		MCDI_STATE_QUIESCENT;
}

static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
323 324
{
	/* Wait until the interface becomes QUIESCENT and we win the race
325 326
	 * to mark it RUNNING_SYNC.
	 */
327
	wait_event(mcdi->wq,
328
		   cmpxchg(&mcdi->state,
329
			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
330
		   MCDI_STATE_QUIESCENT);
331 332 333 334 335 336
}

static int efx_mcdi_await_completion(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);

337 338
	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
			       MCDI_RPC_TIMEOUT) == 0)
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
		return -ETIMEDOUT;

	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
	 * completed the request first, then we'll just end up completing the
	 * request again, which is safe.
	 *
	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
	 * wait_event_timeout() implicitly provides.
	 */
	if (mcdi->mode == MCDI_MODE_POLL)
		return efx_mcdi_poll(efx);

	return 0;
}

355 356 357 358
/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
 * requester.  Return whether this was done.  Does not take any locks.
 */
static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
359
{
360 361 362
	if (cmpxchg(&mcdi->state,
		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
	    MCDI_STATE_RUNNING_SYNC) {
363 364 365 366 367 368 369 370 371
		wake_up(&mcdi->wq);
		return true;
	}

	return false;
}

static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
{
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	if (mcdi->mode == MCDI_MODE_EVENTS) {
		struct efx_mcdi_async_param *async;
		struct efx_nic *efx = mcdi->efx;

		/* Process the asynchronous request queue */
		spin_lock_bh(&mcdi->async_lock);
		async = list_first_entry_or_null(
			&mcdi->async_list, struct efx_mcdi_async_param, list);
		if (async) {
			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
			efx_mcdi_send_request(efx, async->cmd,
					      (const efx_dword_t *)(async + 1),
					      async->inlen);
			mod_timer(&mcdi->async_timer,
				  jiffies + MCDI_RPC_TIMEOUT);
		}
		spin_unlock_bh(&mcdi->async_lock);

		if (async)
			return;
	}

394
	mcdi->state = MCDI_STATE_QUIESCENT;
395 396 397
	wake_up(&mcdi->wq);
}

398 399 400 401 402 403 404 405 406
/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
 * asynchronous completion function, and release the interface.
 * Return whether this was done.  Must be called in bh-disabled
 * context.  Will take iface_lock and async_lock.
 */
static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
	struct efx_nic *efx = mcdi->efx;
	struct efx_mcdi_async_param *async;
E
Edward Cree 已提交
407
	size_t hdr_len, data_len, err_len;
408
	efx_dword_t *outbuf;
E
Edward Cree 已提交
409
	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
	int rc;

	if (cmpxchg(&mcdi->state,
		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
	    MCDI_STATE_RUNNING_ASYNC)
		return false;

	spin_lock(&mcdi->iface_lock);
	if (timeout) {
		/* Ensure that if the completion event arrives later,
		 * the seqno check in efx_mcdi_ev_cpl() will fail
		 */
		++mcdi->seqno;
		++mcdi->credits;
		rc = -ETIMEDOUT;
		hdr_len = 0;
		data_len = 0;
	} else {
		rc = mcdi->resprc;
		hdr_len = mcdi->resp_hdr_len;
		data_len = mcdi->resp_data_len;
	}
	spin_unlock(&mcdi->iface_lock);

	/* Stop the timer.  In case the timer function is running, we
	 * must wait for it to return so that there is no possibility
	 * of it aborting the next request.
	 */
	if (!timeout)
		del_timer_sync(&mcdi->async_timer);

	spin_lock(&mcdi->async_lock);
	async = list_first_entry(&mcdi->async_list,
				 struct efx_mcdi_async_param, list);
	list_del(&async->list);
	spin_unlock(&mcdi->async_lock);

	outbuf = (efx_dword_t *)(async + 1);
	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
				      min(async->outlen, data_len));
E
Edward Cree 已提交
450 451 452 453 454 455 456
	if (!timeout && rc && !async->quiet) {
		err_len = min(sizeof(errbuf), data_len);
		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
					      sizeof(errbuf));
		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
				       err_len, rc);
	}
457 458 459 460 461 462 463 464
	async->complete(efx, async->cookie, rc, outbuf, data_len);
	kfree(async);

	efx_mcdi_release(mcdi);

	return true;
}

465
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
466
			    unsigned int datalen, unsigned int mcdi_err)
467 468 469 470 471 472 473 474 475 476 477
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
	bool wake = false;

	spin_lock(&mcdi->iface_lock);

	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
		if (mcdi->credits)
			/* The request has been cancelled */
			--mcdi->credits;
		else
478 479 480
			netif_err(efx, hw, efx->net_dev,
				  "MC response mismatch tx seq 0x%x rx "
				  "seq 0x%x\n", seqno, mcdi->seqno);
481
	} else {
B
Ben Hutchings 已提交
482 483 484 485 486 487 488 489
		if (efx->type->mcdi_max_ver >= 2) {
			/* MCDI v2 responses don't fit in an event */
			efx_mcdi_read_response_header(efx);
		} else {
			mcdi->resprc = efx_mcdi_errno(mcdi_err);
			mcdi->resp_hdr_len = 4;
			mcdi->resp_data_len = datalen;
		}
490 491 492 493 494 495

		wake = true;
	}

	spin_unlock(&mcdi->iface_lock);

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	if (wake) {
		if (!efx_mcdi_complete_async(mcdi, false))
			(void) efx_mcdi_complete_sync(mcdi);

		/* If the interface isn't RUNNING_ASYNC or
		 * RUNNING_SYNC then we've received a duplicate
		 * completion after we've already transitioned back to
		 * QUIESCENT. [A subsequent invocation would increment
		 * seqno, so would have failed the seqno check].
		 */
	}
}

static void efx_mcdi_timeout_async(unsigned long context)
{
	struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;

	efx_mcdi_complete_async(mcdi, true);
514 515
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
static int
efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
{
	if (efx->type->mcdi_max_ver < 0 ||
	     (efx->type->mcdi_max_ver < 2 &&
	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
		return -EINVAL;

	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
	    (efx->type->mcdi_max_ver < 2 &&
	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
		return -EMSGSIZE;

	return 0;
}

E
Edward Cree 已提交
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
				efx_dword_t *outbuf, size_t outlen,
				size_t *outlen_actual, bool quiet)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
	int rc;

	if (mcdi->mode == MCDI_MODE_POLL)
		rc = efx_mcdi_poll(efx);
	else
		rc = efx_mcdi_await_completion(efx);

	if (rc != 0) {
		netif_err(efx, hw, efx->net_dev,
			  "MC command 0x%x inlen %d mode %d timed out\n",
			  cmd, (int)inlen, mcdi->mode);

		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
			netif_err(efx, hw, efx->net_dev,
				  "MCDI request was completed without an event\n");
			rc = 0;
		}

556 557
		efx_mcdi_abandon(efx);

E
Edward Cree 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
		/* Close the race with efx_mcdi_ev_cpl() executing just too late
		 * and completing a request we've just cancelled, by ensuring
		 * that the seqno check therein fails.
		 */
		spin_lock_bh(&mcdi->iface_lock);
		++mcdi->seqno;
		++mcdi->credits;
		spin_unlock_bh(&mcdi->iface_lock);
	}

	if (rc != 0) {
		if (outlen_actual)
			*outlen_actual = 0;
	} else {
		size_t hdr_len, data_len, err_len;

		/* At the very least we need a memory barrier here to ensure
		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
		 * a spurious efx_mcdi_ev_cpl() running concurrently by
		 * acquiring the iface_lock. */
		spin_lock_bh(&mcdi->iface_lock);
		rc = mcdi->resprc;
		hdr_len = mcdi->resp_hdr_len;
		data_len = mcdi->resp_data_len;
		err_len = min(sizeof(errbuf), data_len);
		spin_unlock_bh(&mcdi->iface_lock);

		BUG_ON(rc > 0);

		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
					      min(outlen, data_len));
		if (outlen_actual)
			*outlen_actual = data_len;

		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);

		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
			/* Don't reset if MC_CMD_REBOOT returns EIO */
		} else if (rc == -EIO || rc == -EINTR) {
			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
				  -rc);
			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
		} else if (rc && !quiet) {
			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
					       rc);
		}

		if (rc == -EIO || rc == -EINTR) {
			msleep(MCDI_STATUS_SLEEP_MS);
			efx_mcdi_poll_reboot(efx);
			mcdi->new_epoch = true;
		}
	}

	efx_mcdi_release(mcdi);
	return rc;
}

static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
			 const efx_dword_t *inbuf, size_t inlen,
			 efx_dword_t *outbuf, size_t outlen,
			 size_t *outlen_actual, bool quiet)
{
	int rc;

	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
	if (rc) {
		if (outlen_actual)
			*outlen_actual = 0;
		return rc;
	}
	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
				    outlen_actual, quiet);
}

633
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
634 635
		 const efx_dword_t *inbuf, size_t inlen,
		 efx_dword_t *outbuf, size_t outlen,
636
		 size_t *outlen_actual)
637
{
E
Edward Cree 已提交
638 639 640
	return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
			     outlen_actual, false);
}
B
Ben Hutchings 已提交
641

E
Edward Cree 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
/* Normally, on receiving an error code in the MCDI response,
 * efx_mcdi_rpc will log an error message containing (among other
 * things) the raw error code, by means of efx_mcdi_display_error.
 * This _quiet version suppresses that; if the caller wishes to log
 * the error conditionally on the return code, it should call this
 * function and is then responsible for calling efx_mcdi_display_error
 * as needed.
 */
int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
		       const efx_dword_t *inbuf, size_t inlen,
		       efx_dword_t *outbuf, size_t outlen,
		       size_t *outlen_actual)
{
	return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
			     outlen_actual, true);
657 658
}

B
Ben Hutchings 已提交
659 660
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
		       const efx_dword_t *inbuf, size_t inlen)
661 662
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
663
	int rc;
664

665 666 667
	rc = efx_mcdi_check_supported(efx, cmd, inlen);
	if (rc)
		return rc;
B
Ben Hutchings 已提交
668

669 670 671
	if (efx->mc_bist_for_other_fn)
		return -ENETDOWN;

672 673 674
	if (mcdi->mode == MCDI_MODE_FAIL)
		return -ENETDOWN;

675
	efx_mcdi_acquire_sync(mcdi);
676
	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
B
Ben Hutchings 已提交
677
	return 0;
678 679
}

E
Edward Cree 已提交
680 681 682 683 684
static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
			       const efx_dword_t *inbuf, size_t inlen,
			       size_t outlen,
			       efx_mcdi_async_completer *complete,
			       unsigned long cookie, bool quiet)
685 686 687 688 689 690 691 692 693
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
	struct efx_mcdi_async_param *async;
	int rc;

	rc = efx_mcdi_check_supported(efx, cmd, inlen);
	if (rc)
		return rc;

694 695 696
	if (efx->mc_bist_for_other_fn)
		return -ENETDOWN;

697 698 699 700 701 702 703 704
	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
			GFP_ATOMIC);
	if (!async)
		return -ENOMEM;

	async->cmd = cmd;
	async->inlen = inlen;
	async->outlen = outlen;
E
Edward Cree 已提交
705
	async->quiet = quiet;
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	async->complete = complete;
	async->cookie = cookie;
	memcpy(async + 1, inbuf, inlen);

	spin_lock_bh(&mcdi->async_lock);

	if (mcdi->mode == MCDI_MODE_EVENTS) {
		list_add_tail(&async->list, &mcdi->async_list);

		/* If this is at the front of the queue, try to start it
		 * immediately
		 */
		if (mcdi->async_list.next == &async->list &&
		    efx_mcdi_acquire_async(mcdi)) {
			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
			mod_timer(&mcdi->async_timer,
				  jiffies + MCDI_RPC_TIMEOUT);
		}
	} else {
		kfree(async);
		rc = -ENETDOWN;
	}

	spin_unlock_bh(&mcdi->async_lock);

	return rc;
}

E
Edward Cree 已提交
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
/**
 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
 * @efx: NIC through which to issue the command
 * @cmd: Command type number
 * @inbuf: Command parameters
 * @inlen: Length of command parameters, in bytes
 * @outlen: Length to allocate for response buffer, in bytes
 * @complete: Function to be called on completion or cancellation.
 * @cookie: Arbitrary value to be passed to @complete.
 *
 * This function does not sleep and therefore may be called in atomic
 * context.  It will fail if event queues are disabled or if MCDI
 * event completions have been disabled due to an error.
 *
 * If it succeeds, the @complete function will be called exactly once
 * in atomic context, when one of the following occurs:
 * (a) the completion event is received (in NAPI context)
 * (b) event queues are disabled (in the process that disables them)
 * (c) the request times-out (in timer context)
 */
int
efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
		   efx_mcdi_async_completer *complete, unsigned long cookie)
{
	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
				   cookie, false);
}

int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
			     const efx_dword_t *inbuf, size_t inlen,
			     size_t outlen, efx_mcdi_async_completer *complete,
			     unsigned long cookie)
{
	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
				   cookie, true);
}

772
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
773 774
			efx_dword_t *outbuf, size_t outlen,
			size_t *outlen_actual)
775
{
E
Edward Cree 已提交
776 777 778
	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
				    outlen_actual, false);
}
779

E
Edward Cree 已提交
780 781 782 783 784 785 786
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
			      efx_dword_t *outbuf, size_t outlen,
			      size_t *outlen_actual)
{
	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
				    outlen_actual, true);
}
787

E
Edward Cree 已提交
788 789 790 791 792
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
			    size_t inlen, efx_dword_t *outbuf,
			    size_t outlen, int rc)
{
	int code = 0, err_arg = 0;
793

E
Edward Cree 已提交
794 795 796 797 798 799 800
	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
		code = MCDI_DWORD(outbuf, ERR_CODE);
	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
	netif_err(efx, hw, efx->net_dev,
		  "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
		  cmd, (int)inlen, rc, code, err_arg);
801 802
}

803 804 805 806
/* Switch to polled MCDI completions.  This can be called in various
 * error conditions with various locks held, so it must be lockless.
 * Caller is responsible for flushing asynchronous requests later.
 */
807 808 809 810
void efx_mcdi_mode_poll(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi;

811
	if (!efx->mcdi)
812 813 814
		return;

	mcdi = efx_mcdi(efx);
815 816 817 818 819
	/* If already in polling mode, nothing to do.
	 * If in fail-fast state, don't switch to polled completion.
	 * FLR recovery will do that later.
	 */
	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
820 821 822 823 824 825 826 827
		return;

	/* We can switch from event completion to polled completion, because
	 * mcdi requests are always completed in shared memory. We do this by
	 * switching the mode to POLL'd then completing the request.
	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
	 *
	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
828
	 * which efx_mcdi_complete_sync() provides for us.
829 830 831
	 */
	mcdi->mode = MCDI_MODE_POLL;

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
	efx_mcdi_complete_sync(mcdi);
}

/* Flush any running or queued asynchronous requests, after event processing
 * is stopped
 */
void efx_mcdi_flush_async(struct efx_nic *efx)
{
	struct efx_mcdi_async_param *async, *next;
	struct efx_mcdi_iface *mcdi;

	if (!efx->mcdi)
		return;

	mcdi = efx_mcdi(efx);

848 849
	/* We must be in poll or fail mode so no more requests can be queued */
	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871

	del_timer_sync(&mcdi->async_timer);

	/* If a request is still running, make sure we give the MC
	 * time to complete it so that the response won't overwrite our
	 * next request.
	 */
	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
		efx_mcdi_poll(efx);
		mcdi->state = MCDI_STATE_QUIESCENT;
	}

	/* Nothing else will access the async list now, so it is safe
	 * to walk it without holding async_lock.  If we hold it while
	 * calling a completer then lockdep may warn that we have
	 * acquired locks in the wrong order.
	 */
	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
		async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
		list_del(&async->list);
		kfree(async);
	}
872 873 874 875 876 877
}

void efx_mcdi_mode_event(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi;

878
	if (!efx->mcdi)
879 880 881
		return;

	mcdi = efx_mcdi(efx);
882 883 884 885 886
	/* If already in event completion mode, nothing to do.
	 * If in fail-fast state, don't switch to event completion.  FLR
	 * recovery will do that later.
	 */
	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
887 888 889 890 891 892 893 894 895
		return;

	/* We can't switch from polled to event completion in the middle of a
	 * request, because the completion method is specified in the request.
	 * So acquire the interface to serialise the requestors. We don't need
	 * to acquire the iface_lock to change the mode here, but we do need a
	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
	 * efx_mcdi_acquire() provides.
	 */
896
	efx_mcdi_acquire_sync(mcdi);
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
	mcdi->mode = MCDI_MODE_EVENTS;
	efx_mcdi_release(mcdi);
}

static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);

	/* If there is an outstanding MCDI request, it has been terminated
	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
	 * in polled mode, then do nothing because the MC reboot handler will
	 * set the header correctly. However, if the mcdi interface is waiting
	 * for a CMDDONE event it won't receive it [and since all MCDI events
	 * are sent to the same queue, we can't be racing with
	 * efx_mcdi_ev_cpl()]
	 *
913 914 915 916 917 918 919 920 921 922 923 924
	 * If there is an outstanding asynchronous request, we can't
	 * complete it now (efx_mcdi_complete() would deadlock).  The
	 * reset process will take care of this.
	 *
	 * There's a race here with efx_mcdi_send_request(), because
	 * we might receive a REBOOT event *before* the request has
	 * been copied out. In polled mode (during startup) this is
	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
	 * event mode, this condition is just an edge-case of
	 * receiving a REBOOT event after posting the MCDI
	 * request. Did the mc reboot before or after the copyout? The
	 * best we can do always is just return failure.
925 926
	 */
	spin_lock(&mcdi->iface_lock);
927
	if (efx_mcdi_complete_sync(mcdi)) {
928 929
		if (mcdi->mode == MCDI_MODE_EVENTS) {
			mcdi->resprc = rc;
B
Ben Hutchings 已提交
930 931
			mcdi->resp_hdr_len = 0;
			mcdi->resp_data_len = 0;
932
			++mcdi->credits;
933
		}
934 935 936 937 938 939 940 941 942
	} else {
		int count;

		/* Consume the status word since efx_mcdi_rpc_finish() won't */
		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
			if (efx_mcdi_poll_reboot(efx))
				break;
			udelay(MCDI_STATUS_DELAY_US);
		}
943
		mcdi->new_epoch = true;
944 945 946

		/* Nobody was waiting for an MCDI request, so trigger a reset */
		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
947 948
	}

949 950 951
	spin_unlock(&mcdi->iface_lock);
}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
/* The MC is going down in to BIST mode. set the BIST flag to block
 * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
 * (which doesn't actually execute a reset, it waits for the controlling
 * function to reset it).
 */
static void efx_mcdi_ev_bist(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);

	spin_lock(&mcdi->iface_lock);
	efx->mc_bist_for_other_fn = true;
	if (efx_mcdi_complete_sync(mcdi)) {
		if (mcdi->mode == MCDI_MODE_EVENTS) {
			mcdi->resprc = -EIO;
			mcdi->resp_hdr_len = 0;
			mcdi->resp_data_len = 0;
			++mcdi->credits;
		}
	}
	mcdi->new_epoch = true;
	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
	spin_unlock(&mcdi->iface_lock);
}

976 977 978 979 980 981 982 983 984 985 986 987 988
/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
 * to recover.
 */
static void efx_mcdi_abandon(struct efx_nic *efx)
{
	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);

	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
		return; /* it had already been done */
	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
}

989 990 991 992 993 994 995 996 997 998
/* Called from  falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
			    efx_qword_t *event)
{
	struct efx_nic *efx = channel->efx;
	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);

	switch (code) {
	case MCDI_EVENT_CODE_BADSSERT:
999 1000
		netif_err(efx, hw, efx->net_dev,
			  "MC watchdog or assertion failure at 0x%x\n", data);
1001
		efx_mcdi_ev_death(efx, -EINTR);
1002 1003 1004
		break;

	case MCDI_EVENT_CODE_PMNOTICE:
1005
		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
		break;

	case MCDI_EVENT_CODE_CMDDONE:
		efx_mcdi_ev_cpl(efx,
				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
		break;

	case MCDI_EVENT_CODE_LINKCHANGE:
		efx_mcdi_process_link_change(efx, event);
		break;
	case MCDI_EVENT_CODE_SENSOREVT:
		efx_mcdi_sensor_event(efx, event);
		break;
	case MCDI_EVENT_CODE_SCHEDERR:
1022 1023
		netif_dbg(efx, hw, efx->net_dev,
			  "MC Scheduler alert (0x%x)\n", data);
1024 1025
		break;
	case MCDI_EVENT_CODE_REBOOT:
1026
	case MCDI_EVENT_CODE_MC_REBOOT:
1027
		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
1028
		efx_mcdi_ev_death(efx, -EIO);
1029
		break;
1030 1031 1032 1033
	case MCDI_EVENT_CODE_MC_BIST:
		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
		efx_mcdi_ev_bist(efx);
		break;
1034 1035 1036
	case MCDI_EVENT_CODE_MAC_STATS_DMA:
		/* MAC stats are gather lazily.  We can ignore this. */
		break;
1037
	case MCDI_EVENT_CODE_FLR:
1038 1039 1040
		if (efx->type->sriov_flr)
			efx->type->sriov_flr(efx,
					     MCDI_EVENT_FIELD(*event, FLR_VF));
1041
		break;
1042 1043 1044 1045 1046
	case MCDI_EVENT_CODE_PTP_RX:
	case MCDI_EVENT_CODE_PTP_FAULT:
	case MCDI_EVENT_CODE_PTP_PPS:
		efx_ptp_event(efx, event);
		break;
1047 1048 1049
	case MCDI_EVENT_CODE_PTP_TIME:
		efx_time_sync_event(channel, event);
		break;
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	case MCDI_EVENT_CODE_TX_FLUSH:
	case MCDI_EVENT_CODE_RX_FLUSH:
		/* Two flush events will be sent: one to the same event
		 * queue as completions, and one to event queue 0.
		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
		 * flag will be set, and we should ignore the event
		 * because we want to wait for all completions.
		 */
		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
			efx_ef10_handle_drain_event(efx);
		break;
1063 1064 1065 1066 1067 1068 1069 1070
	case MCDI_EVENT_CODE_TX_ERR:
	case MCDI_EVENT_CODE_RX_ERR:
		netif_err(efx, hw, efx->net_dev,
			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
			  EFX_QWORD_VAL(*event));
		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
		break;
1071
	default:
1072 1073
		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
			  code);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	}
}

/**************************************************************************
 *
 * Specific request functions
 *
 **************************************************************************
 */

1084
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
1085
{
1086
	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
1087 1088
	size_t outlength;
	const __le16 *ver_words;
1089
	size_t offset;
1090 1091 1092 1093 1094 1095 1096
	int rc;

	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
			  outbuf, sizeof(outbuf), &outlength);
	if (rc)
		goto fail;
1097
	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
1098
		rc = -EIO;
1099 1100 1101 1102
		goto fail;
	}

	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
1103 1104 1105 1106 1107 1108 1109 1110
	offset = snprintf(buf, len, "%u.%u.%u.%u",
			  le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
			  le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));

	/* EF10 may have multiple datapath firmware variants within a
	 * single version.  Report which variants are running.
	 */
	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
1111 1112 1113 1114 1115
		struct efx_ef10_nic_data *nic_data = efx->nic_data;

		offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
				   nic_data->rx_dpcpu_fw_id,
				   nic_data->tx_dpcpu_fw_id);
1116 1117 1118 1119 1120 1121 1122 1123 1124

		/* It's theoretically possible for the string to exceed 31
		 * characters, though in practice the first three version
		 * components are short enough that this doesn't happen.
		 */
		if (WARN_ON(offset >= len))
			buf[0] = 0;
	}

1125
	return;
1126 1127

fail:
1128
	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1129
	buf[0] = 0;
1130 1131
}

1132 1133
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
			       bool *was_attached)
1134
{
1135
	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
1136
	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1137 1138 1139 1140 1141 1142
	size_t outlen;
	int rc;

	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
		       driver_operating ? 1 : 0);
	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
1143
	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
				outbuf, sizeof(outbuf), &outlen);
	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
	 * specified will fail with EPERM, and we have to tell the MC we don't
	 * care what firmware we get.
	 */
	if (rc == -EPERM) {
		netif_dbg(efx, probe, efx->net_dev,
			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
			       MC_CMD_FW_DONT_CARE);
		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
					sizeof(inbuf), outbuf, sizeof(outbuf),
					&outlen);
	}
	if (rc) {
		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
				       outbuf, outlen, rc);
1163
		goto fail;
1164
	}
1165 1166
	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
		rc = -EIO;
1167
		goto fail;
1168
	}
1169

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	if (driver_operating) {
		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
			efx->mcdi->fn_flags =
				MCDI_DWORD(outbuf,
					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
		} else {
			/* Synthesise flags for Siena */
			efx->mcdi->fn_flags =
				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
				(efx_port_num(efx) == 0) <<
				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
		}
	}

1185 1186 1187 1188 1189
	/* We currently assume we have control of the external link
	 * and are completely trusted by firmware.  Abort probing
	 * if that's not true for this function.
	 */

1190 1191 1192 1193 1194
	if (was_attached != NULL)
		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
	return 0;

fail:
1195
	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1196 1197 1198 1199
	return rc;
}

int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1200
			   u16 *fw_subtype_list, u32 *capabilities)
1201
{
1202
	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
1203
	size_t outlen, i;
1204 1205 1206 1207
	int port_num = efx_port_num(efx);
	int rc;

	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1208 1209 1210
	/* we need __aligned(2) for ether_addr_copy */
	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1211 1212 1213 1214 1215 1216

	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;

1217
	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1218
		rc = -EIO;
1219 1220 1221 1222
		goto fail;
	}

	if (mac_address)
1223 1224 1225 1226
		ether_addr_copy(mac_address,
				port_num ?
				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1227 1228
	if (fw_subtype_list) {
		for (i = 0;
1229 1230 1231 1232 1233 1234 1235
		     i < MCDI_VAR_ARRAY_LEN(outlen,
					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
		     i++)
			fw_subtype_list[i] = MCDI_ARRAY_WORD(
				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
			fw_subtype_list[i] = 0;
1236
	}
1237 1238 1239 1240 1241 1242 1243 1244
	if (capabilities) {
		if (port_num)
			*capabilities = MCDI_DWORD(outbuf,
					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
		else
			*capabilities = MCDI_DWORD(outbuf,
					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
	}
1245 1246 1247 1248

	return 0;

fail:
1249 1250
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
		  __func__, rc, (int)outlen);
1251 1252 1253 1254 1255 1256

	return rc;
}

int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
{
1257
	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	u32 dest = 0;
	int rc;

	if (uart)
		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
	if (evq)
		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;

	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);

	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
}

int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
{
1278
	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
1279 1280 1281 1282 1283 1284 1285 1286 1287
	size_t outlen;
	int rc;

	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;
1288 1289
	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
		rc = -EIO;
1290
		goto fail;
1291
	}
1292 1293 1294 1295 1296

	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
	return 0;

fail:
1297 1298
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
		  __func__, rc);
1299 1300 1301 1302 1303 1304 1305
	return rc;
}

int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
			size_t *size_out, size_t *erase_size_out,
			bool *protected_out)
{
1306 1307
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
1308 1309 1310 1311 1312 1313 1314 1315 1316
	size_t outlen;
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;
1317 1318
	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
		rc = -EIO;
1319
		goto fail;
1320
	}
1321 1322 1323 1324

	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
1325
				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
1326 1327 1328
	return 0;

fail:
1329
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1330 1331 1332
	return rc;
}

1333 1334
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
1335 1336
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
			  outbuf, sizeof(outbuf), NULL);
	if (rc)
		return rc;

	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
	case MC_CMD_NVRAM_TEST_PASS:
	case MC_CMD_NVRAM_TEST_NOTSUPP:
		return 0;
	default:
		return -EIO;
	}
}

int efx_mcdi_nvram_test_all(struct efx_nic *efx)
{
	u32 nvram_types;
	unsigned int type;
	int rc;

	rc = efx_mcdi_nvram_types(efx, &nvram_types);
	if (rc)
1363
		goto fail1;
1364 1365 1366 1367 1368 1369

	type = 0;
	while (nvram_types != 0) {
		if (nvram_types & 1) {
			rc = efx_mcdi_nvram_test(efx, type);
			if (rc)
1370
				goto fail2;
1371 1372 1373 1374 1375 1376
		}
		type++;
		nvram_types >>= 1;
	}

	return 0;
1377 1378

fail2:
1379 1380
	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
		  __func__, type);
1381
fail1:
1382
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1383
	return rc;
1384 1385
}

1386 1387 1388
/* Returns 1 if an assertion was read, 0 if no assertion had fired,
 * negative on error.
 */
1389
static int efx_mcdi_read_assertion(struct efx_nic *efx)
1390
{
1391
	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
E
Edward Cree 已提交
1392
	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
1393
	unsigned int flags, index;
1394 1395 1396 1397 1398
	const char *reason;
	size_t outlen;
	int retry;
	int rc;

1399 1400
	/* Attempt to read any stored assertion state before we reboot
	 * the mcfw out of the assertion handler. Retry twice, once
1401 1402 1403 1404 1405
	 * because a boot-time assertion might cause this command to fail
	 * with EINTR. And once again because GET_ASSERTS can race with
	 * MC_CMD_REBOOT running on the other port. */
	retry = 2;
	do {
1406
		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
E
Edward Cree 已提交
1407 1408 1409
		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
					outbuf, sizeof(outbuf), &outlen);
1410 1411
		if (rc == -EPERM)
			return 0;
1412 1413
	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);

E
Edward Cree 已提交
1414 1415 1416 1417
	if (rc) {
		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
				       outlen, rc);
1418
		return rc;
E
Edward Cree 已提交
1419
	}
1420
	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1421
		return -EIO;
1422

1423 1424
	/* Print out any recorded assertion state */
	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
		return 0;

	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
		? "system-level assertion"
		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
		? "thread-level assertion"
		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
		? "watchdog reset"
		: "unknown assertion";
1435 1436 1437 1438
	netif_err(efx, hw, efx->net_dev,
		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1439 1440

	/* Print out the registers */
1441 1442 1443 1444 1445 1446 1447
	for (index = 0;
	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
	     index++)
		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
			  1 + index,
			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
					   index));
1448

1449
	return 1;
1450 1451
}

1452
static int efx_mcdi_exit_assertion(struct efx_nic *efx)
1453
{
1454
	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1455
	int rc;
1456

1457 1458 1459
	/* If the MC is running debug firmware, it might now be
	 * waiting for a debugger to attach, but we just want it to
	 * reboot.  We set a flag that makes the command a no-op if it
1460 1461
	 * has already done so.
	 * The MCDI will thus return either 0 or -EIO.
1462
	 */
1463 1464 1465
	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1466 1467 1468 1469 1470 1471 1472 1473
	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
				NULL, 0, NULL);
	if (rc == -EIO)
		rc = 0;
	if (rc)
		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
				       NULL, 0, rc);
	return rc;
1474 1475 1476 1477 1478 1479 1480
}

int efx_mcdi_handle_assertion(struct efx_nic *efx)
{
	int rc;

	rc = efx_mcdi_read_assertion(efx);
1481
	if (rc <= 0)
1482 1483
		return rc;

1484
	return efx_mcdi_exit_assertion(efx);
1485 1486
}

1487 1488
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
1489
	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	int rc;

	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);

	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);

	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);

	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
}

1504
static int efx_mcdi_reset_func(struct efx_nic *efx)
1505
{
1506 1507 1508 1509 1510 1511 1512 1513 1514
	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
	int rc;

	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
1515 1516
}

1517
static int efx_mcdi_reset_mc(struct efx_nic *efx)
1518
{
1519
	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
	int rc;

	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	/* White is black, and up is down */
	if (rc == -EIO)
		return 0;
	if (rc == 0)
		rc = -EIO;
	return rc;
}

1534 1535 1536 1537 1538 1539 1540 1541 1542
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
{
	return RESET_TYPE_RECOVER_OR_ALL;
}

int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
{
	int rc;

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
	/* If MCDI is down, we can't handle_assertion */
	if (method == RESET_TYPE_MCDI_TIMEOUT) {
		rc = pci_reset_function(efx->pci_dev);
		if (rc)
			return rc;
		/* Re-enable polled MCDI completion */
		if (efx->mcdi) {
			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
			mcdi->mode = MCDI_MODE_POLL;
		}
		return 0;
	}

1556 1557 1558 1559 1560 1561 1562 1563
	/* Recover from a failed assertion pre-reset */
	rc = efx_mcdi_handle_assertion(efx);
	if (rc)
		return rc;

	if (method == RESET_TYPE_WORLD)
		return efx_mcdi_reset_mc(efx);
	else
1564
		return efx_mcdi_reset_func(efx);
1565 1566
}

S
stephen hemminger 已提交
1567 1568
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
				   const u8 *mac, int *id_out)
1569
{
1570 1571
	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1572 1573 1574 1575 1576 1577
	size_t outlen;
	int rc;

	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
		       MC_CMD_FILTER_MODE_SIMPLE);
1578
	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1579 1580 1581 1582 1583 1584 1585

	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;

	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1586
		rc = -EIO;
1587 1588 1589 1590 1591 1592 1593 1594 1595
		goto fail;
	}

	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);

	return 0;

fail:
	*id_out = -1;
1596
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	return rc;

}


int
efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
{
	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
}


int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
1611
	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1612 1613 1614 1615 1616 1617 1618 1619 1620
	size_t outlen;
	int rc;

	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;

	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1621
		rc = -EIO;
1622 1623 1624 1625 1626 1627 1628 1629 1630
		goto fail;
	}

	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);

	return 0;

fail:
	*id_out = -1;
1631
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1632 1633 1634 1635 1636 1637
	return rc;
}


int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
1638
	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1639 1640 1641 1642 1643 1644 1645 1646 1647
	int rc;

	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);

	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
}

1648 1649 1650 1651
int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
	struct efx_channel *channel;
	struct efx_rx_queue *rx_queue;
1652 1653
	MCDI_DECLARE_BUF(inbuf,
			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1654 1655
	int rc, count;

1656 1657 1658
	BUILD_BUG_ON(EFX_MAX_CHANNELS >
		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);

1659 1660 1661 1662 1663 1664
	count = 0;
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_rx_queue(rx_queue, channel) {
			if (rx_queue->flush_pending) {
				rx_queue->flush_pending = false;
				atomic_dec(&efx->rxq_flush_pending);
1665 1666 1667 1668
				MCDI_SET_ARRAY_DWORD(
					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
					count, efx_rx_queue_index(rx_queue));
				count++;
1669 1670 1671 1672
			}
		}
	}

1673 1674
	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1675
	WARN_ON(rc < 0);
1676 1677 1678

	return rc;
}
1679 1680 1681 1682 1683 1684 1685 1686 1687

int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
	int rc;

	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
	return rc;
}

1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);

	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
	return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
			    NULL, 0, NULL);
}

1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
			     unsigned int *enabled_out)
{
	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
	size_t outlen;
	int rc;

	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
		goto fail;

	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
		rc = -EIO;
		goto fail;
	}

	if (impl_out)
		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);

	if (enabled_out)
		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);

	return 0;

fail:
	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
	return rc;
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
#ifdef CONFIG_SFC_MTD

#define EFX_MCDI_NVRAM_LEN_MAX 128

static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);

	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
}

static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
			       loff_t offset, u8 *buffer, size_t length)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
	MCDI_DECLARE_BUF(outbuf,
			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
	size_t outlen;
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
			  outbuf, sizeof(outbuf), &outlen);
	if (rc)
E
Edward Cree 已提交
1763
		return rc;
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929

	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
	return 0;
}

static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
				loff_t offset, const u8 *buffer, size_t length)
{
	MCDI_DECLARE_BUF(inbuf,
			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);

	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
			  NULL, 0, NULL);
	return rc;
}

static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
				loff_t offset, size_t length)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);

	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
}

static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
	int rc;

	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);

	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);

	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
			  NULL, 0, NULL);
	return rc;
}

int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
		      size_t len, size_t *retlen, u8 *buffer)
{
	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
	struct efx_nic *efx = mtd->priv;
	loff_t offset = start;
	loff_t end = min_t(loff_t, start + len, mtd->size);
	size_t chunk;
	int rc = 0;

	while (offset < end) {
		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
					 buffer, chunk);
		if (rc)
			goto out;
		offset += chunk;
		buffer += chunk;
	}
out:
	*retlen = offset - start;
	return rc;
}

int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
	struct efx_nic *efx = mtd->priv;
	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
	loff_t end = min_t(loff_t, start + len, mtd->size);
	size_t chunk = part->common.mtd.erasesize;
	int rc = 0;

	if (!part->updating) {
		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
		if (rc)
			goto out;
		part->updating = true;
	}

	/* The MCDI interface can in fact do multiple erase blocks at once;
	 * but erasing may be slow, so we make multiple calls here to avoid
	 * tripping the MCDI RPC timeout. */
	while (offset < end) {
		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
					  chunk);
		if (rc)
			goto out;
		offset += chunk;
	}
out:
	return rc;
}

int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
		       size_t len, size_t *retlen, const u8 *buffer)
{
	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
	struct efx_nic *efx = mtd->priv;
	loff_t offset = start;
	loff_t end = min_t(loff_t, start + len, mtd->size);
	size_t chunk;
	int rc = 0;

	if (!part->updating) {
		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
		if (rc)
			goto out;
		part->updating = true;
	}

	while (offset < end) {
		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
					  buffer, chunk);
		if (rc)
			goto out;
		offset += chunk;
		buffer += chunk;
	}
out:
	*retlen = offset - start;
	return rc;
}

int efx_mcdi_mtd_sync(struct mtd_info *mtd)
{
	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
	struct efx_nic *efx = mtd->priv;
	int rc = 0;

	if (part->updating) {
		part->updating = false;
		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
	}

	return rc;
}

void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
{
	struct efx_mcdi_mtd_partition *mcdi_part =
		container_of(part, struct efx_mcdi_mtd_partition, common);
	struct efx_nic *efx = part->mtd.priv;

	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
		 efx->name, part->type_name, mcdi_part->fw_subtype);
}

#endif /* CONFIG_SFC_MTD */