falcon.c 51.2 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
4
 * Copyright 2006-2009 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
16
#include <linux/i2c.h>
B
Ben Hutchings 已提交
17
#include <linux/mii.h>
18
#include <linux/slab.h>
19 20 21 22 23
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "mac.h"
#include "spi.h"
B
Ben Hutchings 已提交
24
#include "nic.h"
25
#include "regs.h"
26
#include "io.h"
27 28 29 30
#include "mdio_10g.h"
#include "phy.h"
#include "workarounds.h"

B
Ben Hutchings 已提交
31
/* Hardware control for SFC4000 (aka Falcon). */
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
 * 8 KB, 16-bit address, 32 B write block */
large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
/* Default flash device: Atmel AT25F1024
 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));

47 48 49 50 51 52 53 54
/**************************************************************************
 *
 * I2C bus - this is a bit-bashing interface using GPIO pins
 * Note that it uses the output enables to tristate the outputs
 * SDA is the data pin and SCL is the clock
 *
 **************************************************************************
 */
55
static void falcon_setsda(void *data, int state)
56
{
57
	struct efx_nic *efx = (struct efx_nic *)data;
58 59
	efx_oword_t reg;

60
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
61
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
62
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
63 64
}

65
static void falcon_setscl(void *data, int state)
66
{
67
	struct efx_nic *efx = (struct efx_nic *)data;
68 69
	efx_oword_t reg;

70
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
71
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
72
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
73 74
}

75 76 77 78
static int falcon_getsda(void *data)
{
	struct efx_nic *efx = (struct efx_nic *)data;
	efx_oword_t reg;
79

80 81 82
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
}
83

84 85 86 87
static int falcon_getscl(void *data)
{
	struct efx_nic *efx = (struct efx_nic *)data;
	efx_oword_t reg;
88

89 90
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
91 92
}

93 94 95 96 97 98 99 100 101 102
static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
	.setsda		= falcon_setsda,
	.setscl		= falcon_setscl,
	.getsda		= falcon_getsda,
	.getscl		= falcon_getscl,
	.udelay		= 5,
	/* Wait up to 50 ms for slave to let us pull SCL high */
	.timeout	= DIV_ROUND_UP(HZ, 20),
};

103
static void falcon_push_irq_moderation(struct efx_channel *channel)
104 105 106 107 108 109 110
{
	efx_dword_t timer_cmd;
	struct efx_nic *efx = channel->efx;

	/* Set timer register */
	if (channel->irq_moderation) {
		EFX_POPULATE_DWORD_2(timer_cmd,
111 112 113
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_INT_HLDOFF,
				     FRF_AB_TC_TIMER_VAL,
114
				     channel->irq_moderation - 1);
115 116
	} else {
		EFX_POPULATE_DWORD_2(timer_cmd,
117 118 119
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_DIS,
				     FRF_AB_TC_TIMER_VAL, 0);
120
	}
121
	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
122 123
	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
			       channel->channel);
B
Ben Hutchings 已提交
124 125
}

B
Ben Hutchings 已提交
126 127
static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);

B
Ben Hutchings 已提交
128 129 130 131 132 133 134 135
static void falcon_prepare_flush(struct efx_nic *efx)
{
	falcon_deconfigure_mac_wrapper(efx);

	/* Wait for the tx and rx fifo's to get to the next packet boundary
	 * (~1ms without back-pressure), then to drain the remainder of the
	 * fifo's at data path speeds (negligible), with a healthy margin. */
	msleep(10);
136 137
}

138 139 140 141 142 143 144 145 146 147
/* Acknowledge a legacy interrupt from Falcon
 *
 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 *
 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 * BIU. Interrupt acknowledge is read sensitive so must write instead
 * (then read to ensure the BIU collector is flushed)
 *
 * NB most hardware supports MSI interrupts
 */
148
inline void falcon_irq_ack_a1(struct efx_nic *efx)
149 150 151
{
	efx_dword_t reg;

152
	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
153 154
	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
155 156 157
}


158
irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
159
{
160 161
	struct efx_nic *efx = dev_id;
	efx_oword_t *int_ker = efx->irq_status.addr;
162 163 164 165 166 167 168
	int syserr;
	int queues;

	/* Check to see if this is our interrupt.  If it isn't, we
	 * exit without having touched the hardware.
	 */
	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
169 170 171
		netif_vdbg(efx, intr, efx->net_dev,
			   "IRQ %d on CPU %d not for me\n", irq,
			   raw_smp_processor_id());
172 173 174
		return IRQ_NONE;
	}
	efx->last_irq_cpu = raw_smp_processor_id();
175 176 177
	netif_vdbg(efx, intr, efx->net_dev,
		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
178 179 180 181

	/* Determine interrupting queues, clear interrupt status
	 * register and acknowledge the device interrupt.
	 */
182 183
	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
	queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
184 185 186 187 188 189 190 191

	/* Check to see if we have a serious error condition */
	if (queues & (1U << efx->fatal_irq_level)) {
		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
		if (unlikely(syserr))
			return efx_nic_fatal_interrupt(efx);
	}

192 193 194 195
	EFX_ZERO_OWORD(*int_ker);
	wmb(); /* Ensure the vector is cleared before interrupt ack */
	falcon_irq_ack_a1(efx);

196 197 198 199
	if (queues & 1)
		efx_schedule_channel(efx_get_channel(efx, 0));
	if (queues & 2)
		efx_schedule_channel(efx_get_channel(efx, 1));
200 201 202 203 204 205 206 207 208
	return IRQ_HANDLED;
}
/**************************************************************************
 *
 * EEPROM/flash
 *
 **************************************************************************
 */

209
#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
210

211 212 213
static int falcon_spi_poll(struct efx_nic *efx)
{
	efx_oword_t reg;
214
	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
215
	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
216 217
}

218 219 220
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
221 222 223 224 225 226 227 228 229 230 231 232
	/* Most commands will finish quickly, so we start polling at
	 * very short intervals.  Sometimes the command may have to
	 * wait for VPD or expansion ROM access outside of our
	 * control, so we allow up to 100 ms. */
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
	int i;

	for (i = 0; i < 10; i++) {
		if (!falcon_spi_poll(efx))
			return 0;
		udelay(10);
	}
233

234
	for (;;) {
235
		if (!falcon_spi_poll(efx))
236
			return 0;
237
		if (time_after_eq(jiffies, timeout)) {
238 239
			netif_err(efx, hw, efx->net_dev,
				  "timed out waiting for SPI\n");
240 241
			return -ETIMEDOUT;
		}
242
		schedule_timeout_uninterruptible(1);
243
	}
244 245
}

246
int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
247
		   unsigned int command, int address,
248
		   const void *in, void *out, size_t len)
249
{
250 251
	bool addressed = (address >= 0);
	bool reading = (out != NULL);
252 253 254
	efx_oword_t reg;
	int rc;

255 256 257
	/* Input validation */
	if (len > FALCON_SPI_MAX_LEN)
		return -EINVAL;
258
	BUG_ON(!mutex_is_locked(&efx->spi_lock));
259

260 261
	/* Check that previous command is not still running */
	rc = falcon_spi_poll(efx);
262 263 264
	if (rc)
		return rc;

265 266
	/* Program address register, if we have an address */
	if (addressed) {
267
		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
268
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
269 270 271 272 273
	}

	/* Program data register, if we have data */
	if (in != NULL) {
		memcpy(&reg, in, len);
274
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
275
	}
276

277
	/* Issue read/write command */
278
	EFX_POPULATE_OWORD_7(reg,
279 280 281 282 283 284
			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
			     FRF_AB_EE_SPI_HCMD_READ, reading,
			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
			     FRF_AB_EE_SPI_HCMD_ADBCNT,
285
			     (addressed ? spi->addr_len : 0),
286
			     FRF_AB_EE_SPI_HCMD_ENC, command);
287
	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
288

289
	/* Wait for read/write to complete */
290 291 292 293 294
	rc = falcon_spi_wait(efx);
	if (rc)
		return rc;

	/* Read data */
295
	if (out != NULL) {
296
		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
297 298 299
		memcpy(out, &reg, len);
	}

300 301 302
	return 0;
}

303 304
static size_t
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
305 306 307 308 309 310 311 312 313 314 315 316
{
	return min(FALCON_SPI_MAX_LEN,
		   (spi->block_size - (start & (spi->block_size - 1))));
}

static inline u8
efx_spi_munge_command(const struct efx_spi_device *spi,
		      const u8 command, const unsigned int address)
{
	return command | (((address >> 8) & spi->munge_address) << 3);
}

317
/* Wait up to 10 ms for buffered write completion */
318 319
int
falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
320
{
321
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
322
	u8 status;
323
	int rc;
324

325
	for (;;) {
326
		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
327 328 329 330 331
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
332
		if (time_after_eq(jiffies, timeout)) {
333 334 335 336
			netif_err(efx, hw, efx->net_dev,
				  "SPI write timeout on device %d"
				  " last status=0x%02x\n",
				  spi->device_id, status);
337 338 339
			return -ETIMEDOUT;
		}
		schedule_timeout_uninterruptible(1);
340 341 342
	}
}

343 344
int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
		    loff_t start, size_t len, size_t *retlen, u8 *buffer)
345
{
346 347
	size_t block_len, pos = 0;
	unsigned int command;
348 349 350
	int rc = 0;

	while (pos < len) {
351
		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
352 353

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
354
		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
				    buffer + pos, block_len);
		if (rc)
			break;
		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

373 374 375
int
falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
376 377
{
	u8 verify_buffer[FALCON_SPI_MAX_LEN];
378 379
	size_t block_len, pos = 0;
	unsigned int command;
380 381 382
	int rc = 0;

	while (pos < len) {
383
		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
384 385 386
		if (rc)
			break;

387
		block_len = min(len - pos,
388 389
				falcon_spi_write_limit(spi, start + pos));
		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
390
		rc = falcon_spi_cmd(efx, spi, command, start + pos,
391 392 393 394
				    buffer + pos, NULL, block_len);
		if (rc)
			break;

395
		rc = falcon_spi_wait_write(efx, spi);
396 397 398 399
		if (rc)
			break;

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
400
		rc = falcon_spi_cmd(efx, spi, command, start + pos,
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
				    NULL, verify_buffer, block_len);
		if (memcmp(verify_buffer, buffer + pos, block_len)) {
			rc = -EIO;
			break;
		}

		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

422 423 424 425 426 427
/**************************************************************************
 *
 * MAC wrapper
 *
 **************************************************************************
 */
428

429 430 431 432 433 434 435 436 437 438
static void falcon_push_multicast_hash(struct efx_nic *efx)
{
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;

	WARN_ON(!mutex_is_locked(&efx->mac_lock));

	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
}

B
Ben Hutchings 已提交
439
static void falcon_reset_macs(struct efx_nic *efx)
440
{
B
Ben Hutchings 已提交
441 442
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg, mac_ctrl;
443 444
	int count;

445
	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
446 447 448 449
		/* It's not safe to use GLB_CTL_REG to reset the
		 * macs, so instead use the internal MAC resets
		 */
		if (!EFX_IS10G(efx)) {
450
			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
451
			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
452 453
			udelay(1000);

454
			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
455
			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
456
			udelay(1000);
B
Ben Hutchings 已提交
457
			return;
458
		} else {
459
			EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
460
			efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
461 462

			for (count = 0; count < 10000; count++) {
463
				efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
464 465
				if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
				    0)
B
Ben Hutchings 已提交
466
					return;
467 468
				udelay(10);
			}
469

470 471
			netif_err(efx, hw, efx->net_dev,
				  "timed out waiting for XMAC core reset\n");
472 473
		}
	}
474

B
Ben Hutchings 已提交
475 476
	/* Mac stats will fail whist the TX fifo is draining */
	WARN_ON(nic_data->stats_disable_count == 0);
477

B
Ben Hutchings 已提交
478 479 480
	efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
	EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
481

482
	efx_reado(efx, &reg, FR_AB_GLB_CTL);
483 484 485
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
486
	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
487 488 489

	count = 0;
	while (1) {
490
		efx_reado(efx, &reg, FR_AB_GLB_CTL);
491 492 493
		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
494 495 496
			netif_dbg(efx, hw, efx->net_dev,
				  "Completed MAC reset after %d loops\n",
				  count);
497 498 499
			break;
		}
		if (count > 20) {
500
			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
501 502 503 504 505 506
			break;
		}
		count++;
		udelay(10);
	}

B
Ben Hutchings 已提交
507 508 509
	/* Ensure the correct MAC is selected before statistics
	 * are re-enabled by the caller */
	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
510 511 512

	/* This can run even when the GMAC is selected */
	falcon_setup_xaui(efx);
513 514 515 516 517 518
}

void falcon_drain_tx_fifo(struct efx_nic *efx)
{
	efx_oword_t reg;

519
	if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
520 521 522
	    (efx->loopback_mode != LOOPBACK_NONE))
		return;

523
	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
524
	/* There is no point in draining more than once */
525
	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
526 527 528
		return;

	falcon_reset_macs(efx);
529 530
}

B
Ben Hutchings 已提交
531
static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
532
{
533
	efx_oword_t reg;
534

535
	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
536 537 538
		return;

	/* Isolate the MAC -> RX */
539
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
540
	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
541
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
542

B
Ben Hutchings 已提交
543 544
	/* Isolate TX -> MAC */
	falcon_drain_tx_fifo(efx);
545 546 547 548
}

void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
549
	struct efx_link_state *link_state = &efx->link_state;
550
	efx_oword_t reg;
551 552 553
	int link_speed, isolate;

	isolate = (efx->reset_pending != RESET_TYPE_NONE);
554

555
	switch (link_state->speed) {
B
Ben Hutchings 已提交
556 557 558 559 560
	case 10000: link_speed = 3; break;
	case 1000:  link_speed = 2; break;
	case 100:   link_speed = 1; break;
	default:    link_speed = 0; break;
	}
561 562 563 564 565
	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
	 * as advertised.  Disable to ensure packets are not
	 * indefinitely held and TX queue can be flushed at any point
	 * while the link is down. */
	EFX_POPULATE_OWORD_5(reg,
566 567 568 569 570
			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
			     FRF_AB_MAC_BCAD_ACPT, 1,
			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
			     FRF_AB_MAC_SPEED, link_speed);
571 572
	/* On B0, MAC backpressure can be disabled and packets get
	 * discarded. */
573
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
574
		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
575
				    !link_state->up || isolate);
576 577
	}

578
	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
579 580

	/* Restore the multicast hash registers. */
581
	falcon_push_multicast_hash(efx);
582

583
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
584 585 586
	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
	 * initialisation but it may read back as 0) */
	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
587
	/* Unisolate the MAC -> RX */
588
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
589
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
590
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
591 592
}

593
static void falcon_stats_request(struct efx_nic *efx)
594
{
595
	struct falcon_nic_data *nic_data = efx->nic_data;
596 597
	efx_oword_t reg;

598 599
	WARN_ON(nic_data->stats_pending);
	WARN_ON(nic_data->stats_disable_count);
600

601 602
	if (nic_data->stats_dma_done == NULL)
		return;	/* no mac selected */
603

604 605
	*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
	nic_data->stats_pending = true;
606 607 608 609
	wmb(); /* ensure done flag is clear */

	/* Initiate DMA transfer of stats */
	EFX_POPULATE_OWORD_2(reg,
610 611
			     FRF_AB_MAC_STAT_DMA_CMD, 1,
			     FRF_AB_MAC_STAT_DMA_ADR,
612
			     efx->stats_buffer.dma_addr);
613
	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
614

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
}

static void falcon_stats_complete(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	if (!nic_data->stats_pending)
		return;

	nic_data->stats_pending = 0;
	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
		rmb(); /* read the done flag before the stats */
		efx->mac_op->update_stats(efx);
	} else {
630 631
		netif_err(efx, hw, efx->net_dev,
			  "timed out waiting for statistics\n");
632
	}
633
}
634

635 636 637 638 639 640 641 642 643 644 645 646
static void falcon_stats_timer_func(unsigned long context)
{
	struct efx_nic *efx = (struct efx_nic *)context;
	struct falcon_nic_data *nic_data = efx->nic_data;

	spin_lock(&efx->stats_lock);

	falcon_stats_complete(efx);
	if (nic_data->stats_disable_count == 0)
		falcon_stats_request(efx);

	spin_unlock(&efx->stats_lock);
647 648
}

B
Ben Hutchings 已提交
649 650
static void falcon_switch_mac(struct efx_nic *efx);

S
Steve Hodgson 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
static bool falcon_loopback_link_poll(struct efx_nic *efx)
{
	struct efx_link_state old_state = efx->link_state;

	WARN_ON(!mutex_is_locked(&efx->mac_lock));
	WARN_ON(!LOOPBACK_INTERNAL(efx));

	efx->link_state.fd = true;
	efx->link_state.fc = efx->wanted_fc;
	efx->link_state.up = true;

	if (efx->loopback_mode == LOOPBACK_GMAC)
		efx->link_state.speed = 1000;
	else
		efx->link_state.speed = 10000;

	return !efx_link_state_equal(&efx->link_state, &old_state);
}

B
Ben Hutchings 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
static int falcon_reconfigure_port(struct efx_nic *efx)
{
	int rc;

	WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);

	/* Poll the PHY link state *before* reconfiguring it. This means we
	 * will pick up the correct speed (in loopback) to select the correct
	 * MAC.
	 */
	if (LOOPBACK_INTERNAL(efx))
		falcon_loopback_link_poll(efx);
	else
		efx->phy_op->poll(efx);

	falcon_stop_nic_stats(efx);
	falcon_deconfigure_mac_wrapper(efx);

	falcon_switch_mac(efx);

	efx->phy_op->reconfigure(efx);
	rc = efx->mac_op->reconfigure(efx);
	BUG_ON(rc);

	falcon_start_nic_stats(efx);

	/* Synchronise efx->link_state with the kernel */
	efx_link_status_changed(efx);

	return 0;
}

702 703 704 705 706 707 708 709 710 711
/**************************************************************************
 *
 * PHY access via GMII
 *
 **************************************************************************
 */

/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
712
	efx_oword_t md_stat;
713 714
	int count;

715 716
	/* wait upto 50ms - taken max from datasheet */
	for (count = 0; count < 5000; count++) {
717 718 719 720
		efx_reado(efx, &md_stat, FR_AB_MD_STAT);
		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
721 722 723 724
				netif_err(efx, hw, efx->net_dev,
					  "error from GMII access "
					  EFX_OWORD_FMT"\n",
					  EFX_OWORD_VAL(md_stat));
725 726 727 728 729 730
				return -EIO;
			}
			return 0;
		}
		udelay(10);
	}
731
	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
732 733 734
	return -ETIMEDOUT;
}

735 736 737
/* Write an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_write(struct net_device *net_dev,
			     int prtad, int devad, u16 addr, u16 value)
738
{
739
	struct efx_nic *efx = netdev_priv(net_dev);
740
	efx_oword_t reg;
741
	int rc;
742

743 744
	netif_vdbg(efx, hw, efx->net_dev,
		   "writing MDIO %d register %d.%d with 0x%04x\n",
745
		    prtad, devad, addr, value);
746

747
	mutex_lock(&efx->mdio_lock);
748

749 750 751
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
752 753 754
		goto out;

	/* Write the address/ID register */
755
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
756
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
757

758 759
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
760
	efx_writeo(efx, &reg, FR_AB_MD_ID);
761 762

	/* Write data */
763
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
764
	efx_writeo(efx, &reg, FR_AB_MD_TXD);
765 766

	EFX_POPULATE_OWORD_2(reg,
767 768
			     FRF_AB_MD_WRC, 1,
			     FRF_AB_MD_GC, 0);
769
	efx_writeo(efx, &reg, FR_AB_MD_CS);
770 771

	/* Wait for data to be written */
772 773
	rc = falcon_gmii_wait(efx);
	if (rc) {
774 775
		/* Abort the write operation */
		EFX_POPULATE_OWORD_2(reg,
776 777
				     FRF_AB_MD_WRC, 0,
				     FRF_AB_MD_GC, 1);
778
		efx_writeo(efx, &reg, FR_AB_MD_CS);
779 780 781
		udelay(10);
	}

782 783
out:
	mutex_unlock(&efx->mdio_lock);
784
	return rc;
785 786
}

787 788 789
/* Read an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_read(struct net_device *net_dev,
			    int prtad, int devad, u16 addr)
790
{
791
	struct efx_nic *efx = netdev_priv(net_dev);
792
	efx_oword_t reg;
793
	int rc;
794

795
	mutex_lock(&efx->mdio_lock);
796

797 798 799
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
800 801
		goto out;

802
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
803
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
804

805 806
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
807
	efx_writeo(efx, &reg, FR_AB_MD_ID);
808 809

	/* Request data to be read */
810
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
811
	efx_writeo(efx, &reg, FR_AB_MD_CS);
812 813

	/* Wait for data to become available */
814 815
	rc = falcon_gmii_wait(efx);
	if (rc == 0) {
816
		efx_reado(efx, &reg, FR_AB_MD_RXD);
817
		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
818 819 820
		netif_vdbg(efx, hw, efx->net_dev,
			   "read from MDIO %d register %d.%d, got %04x\n",
			   prtad, devad, addr, rc);
821 822 823
	} else {
		/* Abort the read operation */
		EFX_POPULATE_OWORD_2(reg,
824 825
				     FRF_AB_MD_RIC, 0,
				     FRF_AB_MD_GC, 1);
826
		efx_writeo(efx, &reg, FR_AB_MD_CS);
827

828 829 830
		netif_dbg(efx, hw, efx->net_dev,
			  "read from MDIO %d register %d.%d, got error %d\n",
			  prtad, devad, addr, rc);
831 832
	}

833 834
out:
	mutex_unlock(&efx->mdio_lock);
835
	return rc;
836 837
}

838 839 840 841 842 843 844 845
static void falcon_clock_mac(struct efx_nic *efx)
{
	unsigned strap_val;
	efx_oword_t nic_stat;

	/* Configure the NIC generated MAC clock correctly */
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
	strap_val = EFX_IS10G(efx) ? 5 : 3;
846
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
847 848 849 850 851 852 853 854 855 856 857
		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
		efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
	} else {
		/* Falcon A1 does not support 1G/10G speed switching
		 * and must not be used with a PHY that does. */
		BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
		       strap_val);
	}
}

B
Ben Hutchings 已提交
858
static void falcon_switch_mac(struct efx_nic *efx)
859 860
{
	struct efx_mac_operations *old_mac_op = efx->mac_op;
861 862
	struct falcon_nic_data *nic_data = efx->nic_data;
	unsigned int stats_done_offset;
863

864
	WARN_ON(!mutex_is_locked(&efx->mac_lock));
B
Ben Hutchings 已提交
865 866
	WARN_ON(nic_data->stats_disable_count == 0);

867 868 869
	efx->mac_op = (EFX_IS10G(efx) ?
		       &falcon_xmac_operations : &falcon_gmac_operations);

870 871 872 873 874 875
	if (EFX_IS10G(efx))
		stats_done_offset = XgDmaDone_offset;
	else
		stats_done_offset = GDmaDone_offset;
	nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;

876
	if (old_mac_op == efx->mac_op)
B
Ben Hutchings 已提交
877
		return;
878

879 880
	falcon_clock_mac(efx);

881 882
	netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
		  EFX_IS10G(efx) ? 'X' : 'G');
883
	/* Not all macs support a mac-level link state */
B
Ben Hutchings 已提交
884
	efx->xmac_poll_required = false;
B
Ben Hutchings 已提交
885
	falcon_reset_macs(efx);
886 887
}

888
/* This call is responsible for hooking in the MAC and PHY operations */
889
static int falcon_probe_port(struct efx_nic *efx)
890 891 892
{
	int rc;

893 894 895 896 897 898 899 900 901 902
	switch (efx->phy_type) {
	case PHY_TYPE_SFX7101:
		efx->phy_op = &falcon_sfx7101_phy_ops;
		break;
	case PHY_TYPE_SFT9001A:
	case PHY_TYPE_SFT9001B:
		efx->phy_op = &falcon_sft9001_phy_ops;
		break;
	case PHY_TYPE_QT2022C2:
	case PHY_TYPE_QT2025C:
903
		efx->phy_op = &falcon_qt202x_phy_ops;
904 905
		break;
	default:
906 907
		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
			  efx->phy_type);
908 909 910
		return -ENODEV;
	}

911
	/* Fill out MDIO structure and loopback modes */
912 913
	efx->mdio.mdio_read = falcon_mdio_read;
	efx->mdio.mdio_write = falcon_mdio_write;
914 915 916
	rc = efx->phy_op->probe(efx);
	if (rc != 0)
		return rc;
917

918 919 920 921
	/* Initial assumption */
	efx->link_state.speed = 10000;
	efx->link_state.fd = true;

922
	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
923
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
B
Ben Hutchings 已提交
924
		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
925
	else
B
Ben Hutchings 已提交
926
		efx->wanted_fc = EFX_FC_RX;
927 928
	if (efx->mdio.mmds & MDIO_DEVS_AN)
		efx->wanted_fc |= EFX_FC_AUTO;
929 930

	/* Allocate buffer for stats */
931 932
	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
				  FALCON_MAC_STATS_SIZE);
933 934
	if (rc)
		return rc;
935 936 937 938 939
	netif_dbg(efx, probe, efx->net_dev,
		  "stats buffer at %llx (virt %p phys %llx)\n",
		  (u64)efx->stats_buffer.dma_addr,
		  efx->stats_buffer.addr,
		  (u64)virt_to_phys(efx->stats_buffer.addr));
940 941 942 943

	return 0;
}

944
static void falcon_remove_port(struct efx_nic *efx)
945
{
946
	efx->phy_op->remove(efx);
947
	efx_nic_free_buffer(efx, &efx->stats_buffer);
948 949
}

B
Ben Hutchings 已提交
950 951 952 953 954 955
/**************************************************************************
 *
 * Falcon test code
 *
 **************************************************************************/

956 957
static int
falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
B
Ben Hutchings 已提交
958 959 960 961 962 963 964 965
{
	struct falcon_nvconfig *nvconfig;
	struct efx_spi_device *spi;
	void *region;
	int rc, magic_num, struct_ver;
	__le16 *word, *limit;
	u32 csum;

966 967 968 969
	spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
	if (!spi)
		return -EINVAL;

970
	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
B
Ben Hutchings 已提交
971 972
	if (!region)
		return -ENOMEM;
973
	nvconfig = region + FALCON_NVCONFIG_OFFSET;
B
Ben Hutchings 已提交
974

975
	mutex_lock(&efx->spi_lock);
976
	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
977
	mutex_unlock(&efx->spi_lock);
B
Ben Hutchings 已提交
978
	if (rc) {
979 980
		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
			  efx->spi_flash ? "flash" : "EEPROM");
B
Ben Hutchings 已提交
981 982 983 984 985 986 987 988
		rc = -EIO;
		goto out;
	}

	magic_num = le16_to_cpu(nvconfig->board_magic_num);
	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);

	rc = -EINVAL;
989
	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
990 991
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM bad magic 0x%x\n", magic_num);
B
Ben Hutchings 已提交
992 993 994
		goto out;
	}
	if (struct_ver < 2) {
995 996
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM has ancient version 0x%x\n", struct_ver);
B
Ben Hutchings 已提交
997 998 999 1000 1001 1002
		goto out;
	} else if (struct_ver < 4) {
		word = &nvconfig->board_magic_num;
		limit = (__le16 *) (nvconfig + 1);
	} else {
		word = region;
1003
		limit = region + FALCON_NVCONFIG_END;
B
Ben Hutchings 已提交
1004 1005 1006 1007 1008
	}
	for (csum = 0; word < limit; ++word)
		csum += le16_to_cpu(*word);

	if (~csum & 0xffff) {
1009 1010
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM has incorrect checksum\n");
B
Ben Hutchings 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
		goto out;
	}

	rc = 0;
	if (nvconfig_out)
		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));

 out:
	kfree(region);
	return rc;
}

1023 1024 1025 1026 1027
static int falcon_test_nvram(struct efx_nic *efx)
{
	return falcon_read_nvram(efx, NULL);
}

1028
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1029
	{ FR_AZ_ADR_REGION,
1030
	  EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1031
	{ FR_AZ_RX_CFG,
B
Ben Hutchings 已提交
1032
	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1033
	{ FR_AZ_TX_CFG,
B
Ben Hutchings 已提交
1034
	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1035
	{ FR_AZ_TX_RESERVED,
B
Ben Hutchings 已提交
1036
	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1037
	{ FR_AB_MAC_CTRL,
B
Ben Hutchings 已提交
1038
	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1039
	{ FR_AZ_SRM_TX_DC_CFG,
B
Ben Hutchings 已提交
1040
	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1041
	{ FR_AZ_RX_DC_CFG,
B
Ben Hutchings 已提交
1042
	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1043
	{ FR_AZ_RX_DC_PF_WM,
B
Ben Hutchings 已提交
1044
	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1045
	{ FR_BZ_DP_CTRL,
B
Ben Hutchings 已提交
1046
	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1047
	{ FR_AB_GM_CFG2,
1048
	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1049
	{ FR_AB_GMF_CFG0,
1050
	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1051
	{ FR_AB_XM_GLB_CFG,
B
Ben Hutchings 已提交
1052
	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1053
	{ FR_AB_XM_TX_CFG,
B
Ben Hutchings 已提交
1054
	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1055
	{ FR_AB_XM_RX_CFG,
B
Ben Hutchings 已提交
1056
	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1057
	{ FR_AB_XM_RX_PARAM,
B
Ben Hutchings 已提交
1058
	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1059
	{ FR_AB_XM_FC,
B
Ben Hutchings 已提交
1060
	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1061
	{ FR_AB_XM_ADR_LO,
B
Ben Hutchings 已提交
1062
	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1063
	{ FR_AB_XX_SD_CTL,
B
Ben Hutchings 已提交
1064 1065 1066
	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};

1067 1068 1069 1070 1071 1072
static int falcon_b0_test_registers(struct efx_nic *efx)
{
	return efx_nic_test_registers(efx, falcon_b0_register_tests,
				      ARRAY_SIZE(falcon_b0_register_tests));
}

1073 1074 1075 1076 1077 1078 1079 1080 1081
/**************************************************************************
 *
 * Device reset
 *
 **************************************************************************
 */

/* Resets NIC to known state.  This routine must be called in process
 * context and is allowed to sleep. */
1082
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1083 1084 1085 1086 1087
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t glb_ctl_reg_ker;
	int rc;

1088 1089
	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
		  RESET_TYPE(method));
1090 1091 1092 1093 1094

	/* Initiate device reset */
	if (method == RESET_TYPE_WORLD) {
		rc = pci_save_state(efx->pci_dev);
		if (rc) {
1095 1096 1097
			netif_err(efx, drv, efx->net_dev,
				  "failed to backup PCI state of primary "
				  "function prior to hardware reset\n");
1098 1099
			goto fail1;
		}
1100
		if (efx_nic_is_dual_func(efx)) {
1101 1102
			rc = pci_save_state(nic_data->pci_dev2);
			if (rc) {
1103 1104 1105 1106
				netif_err(efx, drv, efx->net_dev,
					  "failed to backup PCI state of "
					  "secondary function prior to "
					  "hardware reset\n");
1107 1108 1109 1110 1111
				goto fail2;
			}
		}

		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1112 1113 1114
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
1115 1116
	} else {
		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
				     /* exclude PHY from "invisible" reset */
				     FRF_AB_EXT_PHY_RST_CTL,
				     method == RESET_TYPE_INVISIBLE,
				     /* exclude EEPROM/flash and PCIe */
				     FRF_AB_PCIE_CORE_RST_CTL, 1,
				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
				     FRF_AB_PCIE_SD_RST_CTL, 1,
				     FRF_AB_EE_RST_CTL, 1,
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
	}
1129
	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1130

1131
	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1132 1133 1134 1135
	schedule_timeout_uninterruptible(HZ / 20);

	/* Restore PCI configuration if needed */
	if (method == RESET_TYPE_WORLD) {
1136
		if (efx_nic_is_dual_func(efx)) {
1137 1138
			rc = pci_restore_state(nic_data->pci_dev2);
			if (rc) {
1139 1140 1141
				netif_err(efx, drv, efx->net_dev,
					  "failed to restore PCI config for "
					  "the secondary function\n");
1142 1143 1144 1145 1146
				goto fail3;
			}
		}
		rc = pci_restore_state(efx->pci_dev);
		if (rc) {
1147 1148 1149
			netif_err(efx, drv, efx->net_dev,
				  "failed to restore PCI config for the "
				  "primary function\n");
1150 1151
			goto fail4;
		}
1152 1153
		netif_dbg(efx, drv, efx->net_dev,
			  "successfully restored PCI config\n");
1154 1155 1156
	}

	/* Assert that reset complete */
1157
	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1158
	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1159
		rc = -ETIMEDOUT;
1160 1161
		netif_err(efx, hw, efx->net_dev,
			  "timed out waiting for hardware reset\n");
1162 1163
		goto fail5;
	}
1164
	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177

	return 0;

	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
fail3:
	pci_restore_state(efx->pci_dev);
fail1:
fail4:
fail5:
	return rc;
}

1178
static void falcon_monitor(struct efx_nic *efx)
1179
{
S
Steve Hodgson 已提交
1180
	bool link_changed;
1181 1182
	int rc;

S
Steve Hodgson 已提交
1183 1184
	BUG_ON(!mutex_is_locked(&efx->mac_lock));

1185 1186
	rc = falcon_board(efx)->type->monitor(efx);
	if (rc) {
1187 1188 1189
		netif_err(efx, hw, efx->net_dev,
			  "Board sensor %s; shutting down PHY\n",
			  (rc == -ERANGE) ? "reported fault" : "failed");
1190
		efx->phy_mode |= PHY_MODE_LOW_POWER;
B
Ben Hutchings 已提交
1191 1192
		rc = __efx_reconfigure_port(efx);
		WARN_ON(rc);
1193
	}
S
Steve Hodgson 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

	if (LOOPBACK_INTERNAL(efx))
		link_changed = falcon_loopback_link_poll(efx);
	else
		link_changed = efx->phy_op->poll(efx);

	if (link_changed) {
		falcon_stop_nic_stats(efx);
		falcon_deconfigure_mac_wrapper(efx);

		falcon_switch_mac(efx);
B
Ben Hutchings 已提交
1205 1206
		rc = efx->mac_op->reconfigure(efx);
		BUG_ON(rc);
S
Steve Hodgson 已提交
1207 1208 1209 1210 1211 1212

		falcon_start_nic_stats(efx);

		efx_link_status_changed(efx);
	}

B
Ben Hutchings 已提交
1213 1214
	if (EFX_IS10G(efx))
		falcon_poll_xmac(efx);
1215 1216
}

1217 1218 1219 1220 1221 1222 1223 1224 1225
/* Zeroes out the SRAM contents.  This routine must be called in
 * process context and is allowed to sleep.
 */
static int falcon_reset_sram(struct efx_nic *efx)
{
	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
	int count;

	/* Set the SRAM wake/sleep GPIO appropriately. */
1226
	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1227 1228
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1229
	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1230 1231 1232

	/* Initiate SRAM reset */
	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1233 1234
			     FRF_AZ_SRM_INIT_EN, 1,
			     FRF_AZ_SRM_NB_SZ, 0);
1235
	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1236 1237 1238 1239

	/* Wait for SRAM reset to complete */
	count = 0;
	do {
1240 1241
		netif_dbg(efx, hw, efx->net_dev,
			  "waiting for SRAM reset (attempt %d)...\n", count);
1242 1243 1244 1245 1246

		/* SRAM reset is slow; expect around 16ms */
		schedule_timeout_uninterruptible(HZ / 50);

		/* Check for reset complete */
1247
		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1248
		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1249 1250
			netif_dbg(efx, hw, efx->net_dev,
				  "SRAM reset complete\n");
1251 1252 1253 1254 1255

			return 0;
		}
	} while (++count < 20);	/* wait upto 0.4 sec */

1256
	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1257 1258 1259
	return -ETIMEDOUT;
}

1260 1261 1262 1263 1264 1265 1266
static int falcon_spi_device_init(struct efx_nic *efx,
				  struct efx_spi_device **spi_device_ret,
				  unsigned int device_id, u32 device_type)
{
	struct efx_spi_device *spi_device;

	if (device_type != 0) {
1267
		spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1268 1269 1270 1271 1272 1273 1274 1275 1276
		if (!spi_device)
			return -ENOMEM;
		spi_device->device_id = device_id;
		spi_device->size =
			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
		spi_device->addr_len =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
		spi_device->munge_address = (spi_device->size == 1 << 9 &&
					     spi_device->addr_len == 1);
1277 1278 1279 1280 1281
		spi_device->erase_command =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
		spi_device->erase_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_ERASE_SIZE);
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		spi_device->block_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_BLOCK_SIZE);
	} else {
		spi_device = NULL;
	}

	kfree(*spi_device_ret);
	*spi_device_ret = spi_device;
	return 0;
}

static void falcon_remove_spi_devices(struct efx_nic *efx)
{
	kfree(efx->spi_eeprom);
	efx->spi_eeprom = NULL;
	kfree(efx->spi_flash);
	efx->spi_flash = NULL;
}

1302 1303 1304 1305
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
	struct falcon_nvconfig *nvconfig;
B
Ben Hutchings 已提交
1306
	int board_rev;
1307 1308 1309
	int rc;

	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1310 1311
	if (!nvconfig)
		return -ENOMEM;
1312

B
Ben Hutchings 已提交
1313 1314
	rc = falcon_read_nvram(efx, nvconfig);
	if (rc == -EINVAL) {
1315 1316
		netif_err(efx, probe, efx->net_dev,
			  "NVRAM is invalid therefore using defaults\n");
1317
		efx->phy_type = PHY_TYPE_NONE;
1318
		efx->mdio.prtad = MDIO_PRTAD_NONE;
1319
		board_rev = 0;
B
Ben Hutchings 已提交
1320 1321 1322
		rc = 0;
	} else if (rc) {
		goto fail1;
1323 1324
	} else {
		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
1325
		struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
1326 1327

		efx->phy_type = v2->port0_phy_type;
1328
		efx->mdio.prtad = v2->port0_phy_addr;
1329
		board_rev = le16_to_cpu(v2->board_revision);
1330

B
Ben Hutchings 已提交
1331
		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1332 1333 1334 1335
			rc = falcon_spi_device_init(
				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_FLASH]));
1336 1337
			if (rc)
				goto fail2;
1338 1339 1340 1341
			rc = falcon_spi_device_init(
				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_EEPROM]));
1342 1343 1344
			if (rc)
				goto fail2;
		}
1345 1346
	}

B
Ben Hutchings 已提交
1347 1348 1349
	/* Read the MAC addresses */
	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);

1350 1351
	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
		  efx->phy_type, efx->mdio.prtad);
1352

1353 1354 1355
	rc = falcon_probe_board(efx, board_rev);
	if (rc)
		goto fail2;
1356

1357 1358 1359 1360 1361 1362
	kfree(nvconfig);
	return 0;

 fail2:
	falcon_remove_spi_devices(efx);
 fail1:
1363 1364 1365 1366
	kfree(nvconfig);
	return rc;
}

1367 1368 1369 1370
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1371
	int boot_dev;
1372

1373 1374 1375
	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1376

1377 1378 1379
	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1380 1381 1382
		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
			  "flash" : "EEPROM");
1383 1384 1385 1386
	} else {
		/* Disable VPD and set clock dividers to safe
		 * values for initial programming. */
		boot_dev = -1;
1387 1388 1389
		netif_dbg(efx, probe, efx->net_dev,
			  "Booted from internal ASIC settings;"
			  " setting SPI config\n");
1390
		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1391
				     /* 125 MHz / 7 ~= 20 MHz */
1392
				     FRF_AB_EE_SF_CLOCK_DIV, 7,
1393
				     /* 125 MHz / 63 ~= 2 MHz */
1394
				     FRF_AB_EE_EE_CLOCK_DIV, 63);
1395
		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1396 1397
	}

1398 1399 1400
	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
		falcon_spi_device_init(efx, &efx->spi_flash,
				       FFE_AB_SPI_DEVICE_FLASH,
1401
				       default_flash_type);
1402 1403 1404
	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
		falcon_spi_device_init(efx, &efx->spi_eeprom,
				       FFE_AB_SPI_DEVICE_EEPROM,
1405
				       large_eeprom_type);
1406 1407
}

1408
static int falcon_probe_nic(struct efx_nic *efx)
1409 1410
{
	struct falcon_nic_data *nic_data;
1411
	struct falcon_board *board;
1412 1413 1414 1415
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1416 1417
	if (!nic_data)
		return -ENOMEM;
1418
	efx->nic_data = nic_data;
1419

1420 1421 1422
	rc = -ENODEV;

	if (efx_nic_fpga_ver(efx) != 0) {
1423 1424
		netif_err(efx, probe, efx->net_dev,
			  "Falcon FPGA not supported\n");
1425
		goto fail1;
1426 1427 1428 1429 1430 1431
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
		efx_oword_t nic_stat;
		struct pci_dev *dev;
		u8 pci_rev = efx->pci_dev->revision;
1432

1433
		if ((pci_rev == 0xff) || (pci_rev == 0)) {
1434 1435
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A0 not supported\n");
1436 1437 1438 1439
			goto fail1;
		}
		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1440 1441
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 1G not supported\n");
1442 1443 1444
			goto fail1;
		}
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1445 1446
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 PCI-X not supported\n");
1447 1448
			goto fail1;
		}
1449

1450
		dev = pci_dev_get(efx->pci_dev);
1451 1452 1453 1454 1455 1456 1457 1458 1459
		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
1460 1461
			netif_err(efx, probe, efx->net_dev,
				  "failed to find secondary function\n");
1462 1463 1464 1465 1466 1467 1468 1469
			rc = -ENODEV;
			goto fail2;
		}
	}

	/* Now we can reset the NIC */
	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
1470
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1471 1472 1473 1474
		goto fail3;
	}

	/* Allocate memory for INT_KER */
1475
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
1476 1477 1478 1479
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

1480 1481 1482 1483 1484
	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (u64)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (u64)virt_to_phys(efx->irq_status.addr));
1485

1486 1487
	falcon_probe_spi_devices(efx);

1488 1489 1490 1491 1492
	/* Read in the non-volatile configuration */
	rc = falcon_probe_nvconfig(efx);
	if (rc)
		goto fail5;

1493
	/* Initialise I2C adapter */
1494 1495 1496 1497 1498 1499 1500 1501 1502
	board = falcon_board(efx);
	board->i2c_adap.owner = THIS_MODULE;
	board->i2c_data = falcon_i2c_bit_operations;
	board->i2c_data.data = efx;
	board->i2c_adap.algo_data = &board->i2c_data;
	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
		sizeof(board->i2c_adap.name));
	rc = i2c_bit_add_bus(&board->i2c_adap);
1503 1504 1505
	if (rc)
		goto fail5;

1506
	rc = falcon_board(efx)->type->init(efx);
1507
	if (rc) {
1508 1509
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise board\n");
1510 1511 1512
		goto fail6;
	}

1513 1514 1515 1516
	nic_data->stats_disable_count = 1;
	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
		    (unsigned long)efx);

1517 1518
	return 0;

1519
 fail6:
1520 1521
	BUG_ON(i2c_del_adapter(&board->i2c_adap));
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1522
 fail5:
1523
	falcon_remove_spi_devices(efx);
1524
	efx_nic_free_buffer(efx, &efx->irq_status);
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
	/* Prior to Siena the RX DMA engine will split each frame at
	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
	 * be so large that that never happens. */
	const unsigned huge_buf_size = (3 * 4096) >> 5;
	/* RX control FIFO thresholds (32 entries) */
	const unsigned ctrl_xon_thr = 20;
	const unsigned ctrl_xoff_thr = 25;
	/* RX data FIFO thresholds (256-byte units; size varies) */
1547 1548
	int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
	int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1549 1550
	efx_oword_t reg;

1551
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1552
	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1553 1554 1555 1556 1557
		/* Data FIFO size is 5.5K */
		if (data_xon_thr < 0)
			data_xon_thr = 512 >> 8;
		if (data_xoff_thr < 0)
			data_xoff_thr = 2048 >> 8;
1558 1559 1560 1561 1562 1563 1564
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
				    huge_buf_size);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1565
	} else {
1566 1567 1568 1569 1570
		/* Data FIFO size is 80K; register fields moved */
		if (data_xon_thr < 0)
			data_xon_thr = 27648 >> 8; /* ~3*max MTU */
		if (data_xoff_thr < 0)
			data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1571 1572 1573 1574 1575 1576 1577 1578
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
				    huge_buf_size);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1579 1580 1581 1582 1583 1584 1585

		/* Enable hash insertion. This is broken for the
		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
		 * IPv4 hashes. */
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
1586
	}
1587 1588 1589
	/* Always enable XOFF signal from RX FIFO.  We enable
	 * or disable transmission of pause frames at the MAC. */
	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1590
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1591 1592
}

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
/* This call performs hardware-specific global initialisation, such as
 * defining the descriptor cache sizes and number of RSS channels.
 * It does not set up any buffers, descriptor rings or event queues.
 */
static int falcon_init_nic(struct efx_nic *efx)
{
	efx_oword_t temp;
	int rc;

	/* Use on-chip SRAM */
	efx_reado(efx, &temp, FR_AB_NIC_STAT);
	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
	efx_writeo(efx, &temp, FR_AB_NIC_STAT);

	/* Set the source of the GMAC clock */
	if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
		efx_reado(efx, &temp, FR_AB_GPIO_CTL);
		EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
		efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
	}

	/* Select the correct MAC */
	falcon_clock_mac(efx);

	rc = falcon_reset_sram(efx);
	if (rc)
		return rc;

	/* Clear the parity enables on the TX data fifos as
	 * they produce false parity errors because of timing issues
	 */
	if (EFX_WORKAROUND_5129(efx)) {
		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
	}

1630
	if (EFX_WORKAROUND_7244(efx)) {
1631
		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
1632 1633 1634 1635
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
1636
		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
1637 1638
	}

1639
	/* XXX This is documented only for Falcon A0/A1 */
1640 1641 1642
	/* Setup RX.  Wait for descriptor is broken and must
	 * be disabled.  RXDP recovery shouldn't be needed, but is.
	 */
1643
	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
1644 1645
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
1646
	if (EFX_WORKAROUND_5583(efx))
1647
		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
1648
	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
1649 1650 1651 1652

	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
	 * descriptors (which is bad).
	 */
1653
	efx_reado(efx, &temp, FR_AZ_TX_CFG);
1654
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
1655
	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
1656

1657
	falcon_init_rx_cfg(efx);
1658

1659
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1660 1661 1662 1663 1664
		/* Set hash key for IPv4 */
		memcpy(&temp, efx->rx_hash_key, sizeof(temp));
		efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);

		/* Set destination of both TX and RX Flush events */
1665
		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
1666
		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1667 1668
	}

1669 1670
	efx_nic_init_common(efx);

1671 1672 1673
	return 0;
}

1674
static void falcon_remove_nic(struct efx_nic *efx)
1675 1676
{
	struct falcon_nic_data *nic_data = efx->nic_data;
1677
	struct falcon_board *board = falcon_board(efx);
1678 1679
	int rc;

1680
	board->type->fini(efx);
1681

1682
	/* Remove I2C adapter and clear it in preparation for a retry */
1683
	rc = i2c_del_adapter(&board->i2c_adap);
1684
	BUG_ON(rc);
1685
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1686

1687
	falcon_remove_spi_devices(efx);
1688
	efx_nic_free_buffer(efx, &efx->irq_status);
1689

B
Ben Hutchings 已提交
1690
	falcon_reset_hw(efx, RESET_TYPE_ALL);
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702

	/* Release the second function after the reset */
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}

	/* Tear down the private nic state */
	kfree(efx->nic_data);
	efx->nic_data = NULL;
}

1703
static void falcon_update_nic_stats(struct efx_nic *efx)
1704
{
1705
	struct falcon_nic_data *nic_data = efx->nic_data;
1706 1707
	efx_oword_t cnt;

1708 1709 1710
	if (nic_data->stats_disable_count)
		return;

1711
	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
1712 1713
	efx->n_rx_nodesc_drop_cnt +=
		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756

	if (nic_data->stats_pending &&
	    *nic_data->stats_dma_done == FALCON_STATS_DONE) {
		nic_data->stats_pending = false;
		rmb(); /* read the done flag before the stats */
		efx->mac_op->update_stats(efx);
	}
}

void falcon_start_nic_stats(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	spin_lock_bh(&efx->stats_lock);
	if (--nic_data->stats_disable_count == 0)
		falcon_stats_request(efx);
	spin_unlock_bh(&efx->stats_lock);
}

void falcon_stop_nic_stats(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	int i;

	might_sleep();

	spin_lock_bh(&efx->stats_lock);
	++nic_data->stats_disable_count;
	spin_unlock_bh(&efx->stats_lock);

	del_timer_sync(&nic_data->stats_timer);

	/* Wait enough time for the most recent transfer to
	 * complete. */
	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
		if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
			break;
		msleep(1);
	}

	spin_lock_bh(&efx->stats_lock);
	falcon_stats_complete(efx);
	spin_unlock_bh(&efx->stats_lock);
1757 1758
}

1759 1760 1761 1762 1763
static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
	falcon_board(efx)->type->set_id_led(efx, mode);
}

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
/**************************************************************************
 *
 * Wake on LAN
 *
 **************************************************************************
 */

static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
	wol->supported = 0;
	wol->wolopts = 0;
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}

static int falcon_set_wol(struct efx_nic *efx, u32 type)
{
	if (type != 0)
		return -EINVAL;
	return 0;
}

1785 1786
/**************************************************************************
 *
1787
 * Revision-dependent attributes used by efx.c and nic.c
1788 1789 1790 1791
 *
 **************************************************************************
 */

1792
struct efx_nic_type falcon_a1_nic_type = {
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	.probe = falcon_probe_nic,
	.remove = falcon_remove_nic,
	.init = falcon_init_nic,
	.fini = efx_port_dummy_op_void,
	.monitor = falcon_monitor,
	.reset = falcon_reset_hw,
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
	.prepare_flush = falcon_prepare_flush,
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
1805
	.set_id_led = falcon_set_id_led,
1806 1807
	.push_irq_moderation = falcon_push_irq_moderation,
	.push_multicast_hash = falcon_push_multicast_hash,
B
Ben Hutchings 已提交
1808
	.reconfigure_port = falcon_reconfigure_port,
1809 1810 1811
	.get_wol = falcon_get_wol,
	.set_wol = falcon_set_wol,
	.resume_wol = efx_port_dummy_op_void,
1812
	.test_nvram = falcon_test_nvram,
1813 1814
	.default_mac_ops = &falcon_xmac_operations,

1815
	.revision = EFX_REV_FALCON_A1,
1816
	.mem_map_size = 0x20000,
1817 1818 1819 1820 1821
	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1822
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1823 1824 1825
	.rx_buffer_padding = 0x24,
	.max_interrupt_mode = EFX_INT_MODE_MSI,
	.phys_addr_channels = 4,
1826 1827
	.tx_dc_base = 0x130000,
	.rx_dc_base = 0x100000,
1828
	.offload_features = NETIF_F_IP_CSUM,
1829
	.reset_world_flags = ETH_RESET_IRQ,
1830 1831
};

1832
struct efx_nic_type falcon_b0_nic_type = {
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
	.probe = falcon_probe_nic,
	.remove = falcon_remove_nic,
	.init = falcon_init_nic,
	.fini = efx_port_dummy_op_void,
	.monitor = falcon_monitor,
	.reset = falcon_reset_hw,
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
	.prepare_flush = falcon_prepare_flush,
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
1845
	.set_id_led = falcon_set_id_led,
1846 1847
	.push_irq_moderation = falcon_push_irq_moderation,
	.push_multicast_hash = falcon_push_multicast_hash,
B
Ben Hutchings 已提交
1848
	.reconfigure_port = falcon_reconfigure_port,
1849 1850 1851
	.get_wol = falcon_get_wol,
	.set_wol = falcon_set_wol,
	.resume_wol = efx_port_dummy_op_void,
1852
	.test_registers = falcon_b0_test_registers,
1853
	.test_nvram = falcon_test_nvram,
1854 1855
	.default_mac_ops = &falcon_xmac_operations,

1856
	.revision = EFX_REV_FALCON_B0,
1857 1858 1859
	/* Map everything up to and including the RSS indirection
	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
	 * requires that they not be mapped.  */
1860 1861 1862 1863 1864 1865 1866 1867
	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
			 FR_BZ_RX_INDIRECTION_TBL_STEP *
			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1868
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1869
	.rx_buffer_hash_size = 0x10,
1870 1871 1872 1873 1874
	.rx_buffer_padding = 0,
	.max_interrupt_mode = EFX_INT_MODE_MSIX,
	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
				   * interrupt handler only supports 32
				   * channels */
1875 1876
	.tx_dc_base = 0x130000,
	.rx_dc_base = 0x100000,
1877
	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1878
	.reset_world_flags = ETH_RESET_IRQ,
1879 1880
};