falcon.c 72.1 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2006-2010 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
16
#include <linux/i2c.h>
B
Ben Hutchings 已提交
17
#include <linux/mii.h>
18
#include <linux/slab.h>
19 20 21 22
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "spi.h"
B
Ben Hutchings 已提交
23
#include "nic.h"
24
#include "farch_regs.h"
25
#include "io.h"
26 27
#include "phy.h"
#include "workarounds.h"
28
#include "selftest.h"
29
#include "mdio_10g.h"
30

B
Ben Hutchings 已提交
31
/* Hardware control for SFC4000 (aka Falcon). */
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
/**************************************************************************
 *
 * MAC stats DMA format
 *
 **************************************************************************
 */

#define FALCON_MAC_STATS_SIZE 0x100

#define XgRxOctets_offset 0x0
#define XgRxOctets_WIDTH 48
#define XgRxOctetsOK_offset 0x8
#define XgRxOctetsOK_WIDTH 48
#define XgRxPkts_offset 0x10
#define XgRxPkts_WIDTH 32
#define XgRxPktsOK_offset 0x14
#define XgRxPktsOK_WIDTH 32
#define XgRxBroadcastPkts_offset 0x18
#define XgRxBroadcastPkts_WIDTH 32
#define XgRxMulticastPkts_offset 0x1C
#define XgRxMulticastPkts_WIDTH 32
#define XgRxUnicastPkts_offset 0x20
#define XgRxUnicastPkts_WIDTH 32
#define XgRxUndersizePkts_offset 0x24
#define XgRxUndersizePkts_WIDTH 32
#define XgRxOversizePkts_offset 0x28
#define XgRxOversizePkts_WIDTH 32
#define XgRxJabberPkts_offset 0x2C
#define XgRxJabberPkts_WIDTH 32
#define XgRxUndersizeFCSerrorPkts_offset 0x30
#define XgRxUndersizeFCSerrorPkts_WIDTH 32
#define XgRxDropEvents_offset 0x34
#define XgRxDropEvents_WIDTH 32
#define XgRxFCSerrorPkts_offset 0x38
#define XgRxFCSerrorPkts_WIDTH 32
#define XgRxAlignError_offset 0x3C
#define XgRxAlignError_WIDTH 32
#define XgRxSymbolError_offset 0x40
#define XgRxSymbolError_WIDTH 32
#define XgRxInternalMACError_offset 0x44
#define XgRxInternalMACError_WIDTH 32
#define XgRxControlPkts_offset 0x48
#define XgRxControlPkts_WIDTH 32
#define XgRxPausePkts_offset 0x4C
#define XgRxPausePkts_WIDTH 32
#define XgRxPkts64Octets_offset 0x50
#define XgRxPkts64Octets_WIDTH 32
#define XgRxPkts65to127Octets_offset 0x54
#define XgRxPkts65to127Octets_WIDTH 32
#define XgRxPkts128to255Octets_offset 0x58
#define XgRxPkts128to255Octets_WIDTH 32
#define XgRxPkts256to511Octets_offset 0x5C
#define XgRxPkts256to511Octets_WIDTH 32
#define XgRxPkts512to1023Octets_offset 0x60
#define XgRxPkts512to1023Octets_WIDTH 32
#define XgRxPkts1024to15xxOctets_offset 0x64
#define XgRxPkts1024to15xxOctets_WIDTH 32
#define XgRxPkts15xxtoMaxOctets_offset 0x68
#define XgRxPkts15xxtoMaxOctets_WIDTH 32
#define XgRxLengthError_offset 0x6C
#define XgRxLengthError_WIDTH 32
#define XgTxPkts_offset 0x80
#define XgTxPkts_WIDTH 32
#define XgTxOctets_offset 0x88
#define XgTxOctets_WIDTH 48
#define XgTxMulticastPkts_offset 0x90
#define XgTxMulticastPkts_WIDTH 32
#define XgTxBroadcastPkts_offset 0x94
#define XgTxBroadcastPkts_WIDTH 32
#define XgTxUnicastPkts_offset 0x98
#define XgTxUnicastPkts_WIDTH 32
#define XgTxControlPkts_offset 0x9C
#define XgTxControlPkts_WIDTH 32
#define XgTxPausePkts_offset 0xA0
#define XgTxPausePkts_WIDTH 32
#define XgTxPkts64Octets_offset 0xA4
#define XgTxPkts64Octets_WIDTH 32
#define XgTxPkts65to127Octets_offset 0xA8
#define XgTxPkts65to127Octets_WIDTH 32
#define XgTxPkts128to255Octets_offset 0xAC
#define XgTxPkts128to255Octets_WIDTH 32
#define XgTxPkts256to511Octets_offset 0xB0
#define XgTxPkts256to511Octets_WIDTH 32
#define XgTxPkts512to1023Octets_offset 0xB4
#define XgTxPkts512to1023Octets_WIDTH 32
#define XgTxPkts1024to15xxOctets_offset 0xB8
#define XgTxPkts1024to15xxOctets_WIDTH 32
#define XgTxPkts1519toMaxOctets_offset 0xBC
#define XgTxPkts1519toMaxOctets_WIDTH 32
#define XgTxUndersizePkts_offset 0xC0
#define XgTxUndersizePkts_WIDTH 32
#define XgTxOversizePkts_offset 0xC4
#define XgTxOversizePkts_WIDTH 32
#define XgTxNonTcpUdpPkt_offset 0xC8
#define XgTxNonTcpUdpPkt_WIDTH 16
#define XgTxMacSrcErrPkt_offset 0xCC
#define XgTxMacSrcErrPkt_WIDTH 16
#define XgTxIpSrcErrPkt_offset 0xD0
#define XgTxIpSrcErrPkt_WIDTH 16
#define XgDmaDone_offset 0xD4
#define XgDmaDone_WIDTH 32

#define FALCON_STATS_NOT_DONE 0x00000000
#define FALCON_STATS_DONE 0xffffffff

#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)

/* Retrieve statistic from statistics block */
#define FALCON_STAT(efx, falcon_stat, efx_stat) do {		\
	if (FALCON_STAT_WIDTH(falcon_stat) == 16)		\
		(efx)->mac_stats.efx_stat += le16_to_cpu(	\
			*((__force __le16 *)				\
			  (efx->stats_buffer.addr +		\
			   FALCON_STAT_OFFSET(falcon_stat))));	\
	else if (FALCON_STAT_WIDTH(falcon_stat) == 32)		\
		(efx)->mac_stats.efx_stat += le32_to_cpu(	\
			*((__force __le32 *)				\
			  (efx->stats_buffer.addr +		\
			   FALCON_STAT_OFFSET(falcon_stat))));	\
	else							\
		(efx)->mac_stats.efx_stat += le64_to_cpu(	\
			*((__force __le64 *)				\
			  (efx->stats_buffer.addr +		\
			   FALCON_STAT_OFFSET(falcon_stat))));	\
	} while (0)

/**************************************************************************
 *
 * Non-volatile configuration
 *
 **************************************************************************
 */

/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
struct falcon_nvconfig_board_v2 {
	__le16 nports;
	u8 port0_phy_addr;
	u8 port0_phy_type;
	u8 port1_phy_addr;
	u8 port1_phy_type;
	__le16 asic_sub_revision;
	__le16 board_revision;
} __packed;

/* Board configuration v3 extra information */
struct falcon_nvconfig_board_v3 {
	__le32 spi_device_type[2];
} __packed;

/* Bit numbers for spi_device_type */
#define SPI_DEV_TYPE_SIZE_LBN 0
#define SPI_DEV_TYPE_SIZE_WIDTH 5
#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
#define SPI_DEV_TYPE_FIELD(type, field)					\
	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))

#define FALCON_NVCONFIG_OFFSET 0x300

#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
struct falcon_nvconfig {
	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
	u8 mac_address[2][8];			/* 0x310 */
	efx_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
	efx_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
	efx_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
	efx_oword_t hw_init_reg;			/* 0x350 */
	efx_oword_t nic_stat_reg;			/* 0x360 */
	efx_oword_t glb_ctl_reg;			/* 0x370 */
	efx_oword_t srm_cfg_reg;			/* 0x380 */
	efx_oword_t spare_reg;				/* 0x390 */
	__le16 board_magic_num;			/* 0x3A0 */
	__le16 board_struct_ver;
	__le16 board_checksum;
	struct falcon_nvconfig_board_v2 board_v2;
	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
} __packed;

/*************************************************************************/

221
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
222
static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
223

224 225 226 227 228 229 230 231 232 233 234 235 236 237
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
 * 8 KB, 16-bit address, 32 B write block */
large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
/* Default flash device: Atmel AT25F1024
 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));

238 239 240 241 242 243 244 245
/**************************************************************************
 *
 * I2C bus - this is a bit-bashing interface using GPIO pins
 * Note that it uses the output enables to tristate the outputs
 * SDA is the data pin and SCL is the clock
 *
 **************************************************************************
 */
246
static void falcon_setsda(void *data, int state)
247
{
248
	struct efx_nic *efx = (struct efx_nic *)data;
249 250
	efx_oword_t reg;

251
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
252
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
253
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
254 255
}

256
static void falcon_setscl(void *data, int state)
257
{
258
	struct efx_nic *efx = (struct efx_nic *)data;
259 260
	efx_oword_t reg;

261
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
262
	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
263
	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
264 265
}

266 267 268 269
static int falcon_getsda(void *data)
{
	struct efx_nic *efx = (struct efx_nic *)data;
	efx_oword_t reg;
270

271 272 273
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
}
274

275 276 277 278
static int falcon_getscl(void *data)
{
	struct efx_nic *efx = (struct efx_nic *)data;
	efx_oword_t reg;
279

280 281
	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
282 283
}

284
static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
285 286 287 288 289 290 291 292 293
	.setsda		= falcon_setsda,
	.setscl		= falcon_setscl,
	.getsda		= falcon_getsda,
	.getscl		= falcon_getscl,
	.udelay		= 5,
	/* Wait up to 50 ms for slave to let us pull SCL high */
	.timeout	= DIV_ROUND_UP(HZ, 20),
};

294
static void falcon_push_irq_moderation(struct efx_channel *channel)
295 296 297 298 299 300 301
{
	efx_dword_t timer_cmd;
	struct efx_nic *efx = channel->efx;

	/* Set timer register */
	if (channel->irq_moderation) {
		EFX_POPULATE_DWORD_2(timer_cmd,
302 303 304
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_INT_HLDOFF,
				     FRF_AB_TC_TIMER_VAL,
305
				     channel->irq_moderation - 1);
306 307
	} else {
		EFX_POPULATE_DWORD_2(timer_cmd,
308 309 310
				     FRF_AB_TC_TIMER_MODE,
				     FFE_BB_TIMER_MODE_DIS,
				     FRF_AB_TC_TIMER_VAL, 0);
311
	}
312
	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
313 314
	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
			       channel->channel);
B
Ben Hutchings 已提交
315 316
}

B
Ben Hutchings 已提交
317 318
static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);

B
Ben Hutchings 已提交
319 320 321 322 323 324 325 326
static void falcon_prepare_flush(struct efx_nic *efx)
{
	falcon_deconfigure_mac_wrapper(efx);

	/* Wait for the tx and rx fifo's to get to the next packet boundary
	 * (~1ms without back-pressure), then to drain the remainder of the
	 * fifo's at data path speeds (negligible), with a healthy margin. */
	msleep(10);
327 328
}

329 330 331 332 333 334 335 336 337 338
/* Acknowledge a legacy interrupt from Falcon
 *
 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 *
 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 * BIU. Interrupt acknowledge is read sensitive so must write instead
 * (then read to ensure the BIU collector is flushed)
 *
 * NB most hardware supports MSI interrupts
 */
339
static inline void falcon_irq_ack_a1(struct efx_nic *efx)
340 341 342
{
	efx_dword_t reg;

343
	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
344 345
	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
346 347 348
}


349
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
350
{
351 352
	struct efx_nic *efx = dev_id;
	efx_oword_t *int_ker = efx->irq_status.addr;
353 354 355 356 357 358 359
	int syserr;
	int queues;

	/* Check to see if this is our interrupt.  If it isn't, we
	 * exit without having touched the hardware.
	 */
	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
360 361 362
		netif_vdbg(efx, intr, efx->net_dev,
			   "IRQ %d on CPU %d not for me\n", irq,
			   raw_smp_processor_id());
363 364 365
		return IRQ_NONE;
	}
	efx->last_irq_cpu = raw_smp_processor_id();
366 367 368
	netif_vdbg(efx, intr, efx->net_dev,
		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
369

B
Ben Hutchings 已提交
370 371 372
	if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
		return IRQ_HANDLED;

373 374 375
	/* Check to see if we have a serious error condition */
	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
	if (unlikely(syserr))
376
		return efx_farch_fatal_interrupt(efx);
377

378 379 380
	/* Determine interrupting queues, clear interrupt status
	 * register and acknowledge the device interrupt.
	 */
381 382
	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
	queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
383 384 385 386
	EFX_ZERO_OWORD(*int_ker);
	wmb(); /* Ensure the vector is cleared before interrupt ack */
	falcon_irq_ack_a1(efx);

387
	if (queues & 1)
388
		efx_schedule_channel_irq(efx_get_channel(efx, 0));
389
	if (queues & 2)
390
		efx_schedule_channel_irq(efx_get_channel(efx, 1));
391 392 393 394 395 396 397 398 399
	return IRQ_HANDLED;
}
/**************************************************************************
 *
 * EEPROM/flash
 *
 **************************************************************************
 */

400
#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
401

402 403 404
static int falcon_spi_poll(struct efx_nic *efx)
{
	efx_oword_t reg;
405
	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
406
	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
407 408
}

409 410 411
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
412 413 414 415 416 417 418 419 420 421 422 423
	/* Most commands will finish quickly, so we start polling at
	 * very short intervals.  Sometimes the command may have to
	 * wait for VPD or expansion ROM access outside of our
	 * control, so we allow up to 100 ms. */
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
	int i;

	for (i = 0; i < 10; i++) {
		if (!falcon_spi_poll(efx))
			return 0;
		udelay(10);
	}
424

425
	for (;;) {
426
		if (!falcon_spi_poll(efx))
427
			return 0;
428
		if (time_after_eq(jiffies, timeout)) {
429 430
			netif_err(efx, hw, efx->net_dev,
				  "timed out waiting for SPI\n");
431 432
			return -ETIMEDOUT;
		}
433
		schedule_timeout_uninterruptible(1);
434
	}
435 436
}

437
int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
438
		   unsigned int command, int address,
439
		   const void *in, void *out, size_t len)
440
{
441 442
	bool addressed = (address >= 0);
	bool reading = (out != NULL);
443 444 445
	efx_oword_t reg;
	int rc;

446 447 448
	/* Input validation */
	if (len > FALCON_SPI_MAX_LEN)
		return -EINVAL;
449

450 451
	/* Check that previous command is not still running */
	rc = falcon_spi_poll(efx);
452 453 454
	if (rc)
		return rc;

455 456
	/* Program address register, if we have an address */
	if (addressed) {
457
		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
458
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
459 460 461 462 463
	}

	/* Program data register, if we have data */
	if (in != NULL) {
		memcpy(&reg, in, len);
464
		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
465
	}
466

467
	/* Issue read/write command */
468
	EFX_POPULATE_OWORD_7(reg,
469 470 471 472 473 474
			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
			     FRF_AB_EE_SPI_HCMD_READ, reading,
			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
			     FRF_AB_EE_SPI_HCMD_ADBCNT,
475
			     (addressed ? spi->addr_len : 0),
476
			     FRF_AB_EE_SPI_HCMD_ENC, command);
477
	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
478

479
	/* Wait for read/write to complete */
480 481 482 483 484
	rc = falcon_spi_wait(efx);
	if (rc)
		return rc;

	/* Read data */
485
	if (out != NULL) {
486
		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
487 488 489
		memcpy(out, &reg, len);
	}

490 491 492
	return 0;
}

493 494
static size_t
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
495 496 497 498 499 500 501 502 503 504 505 506
{
	return min(FALCON_SPI_MAX_LEN,
		   (spi->block_size - (start & (spi->block_size - 1))));
}

static inline u8
efx_spi_munge_command(const struct efx_spi_device *spi,
		      const u8 command, const unsigned int address)
{
	return command | (((address >> 8) & spi->munge_address) << 3);
}

507
/* Wait up to 10 ms for buffered write completion */
508 509
int
falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
510
{
511
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
512
	u8 status;
513
	int rc;
514

515
	for (;;) {
516
		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
517 518 519 520 521
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
522
		if (time_after_eq(jiffies, timeout)) {
523 524 525 526
			netif_err(efx, hw, efx->net_dev,
				  "SPI write timeout on device %d"
				  " last status=0x%02x\n",
				  spi->device_id, status);
527 528 529
			return -ETIMEDOUT;
		}
		schedule_timeout_uninterruptible(1);
530 531 532
	}
}

533 534
int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
		    loff_t start, size_t len, size_t *retlen, u8 *buffer)
535
{
536 537
	size_t block_len, pos = 0;
	unsigned int command;
538 539 540
	int rc = 0;

	while (pos < len) {
541
		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
542 543

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
544
		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
				    buffer + pos, block_len);
		if (rc)
			break;
		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

563 564 565
int
falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
566 567
{
	u8 verify_buffer[FALCON_SPI_MAX_LEN];
568 569
	size_t block_len, pos = 0;
	unsigned int command;
570 571 572
	int rc = 0;

	while (pos < len) {
573
		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
574 575 576
		if (rc)
			break;

577
		block_len = min(len - pos,
578 579
				falcon_spi_write_limit(spi, start + pos));
		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
580
		rc = falcon_spi_cmd(efx, spi, command, start + pos,
581 582 583 584
				    buffer + pos, NULL, block_len);
		if (rc)
			break;

585
		rc = falcon_spi_wait_write(efx, spi);
586 587 588 589
		if (rc)
			break;

		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
590
		rc = falcon_spi_cmd(efx, spi, command, start + pos,
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
				    NULL, verify_buffer, block_len);
		if (memcmp(verify_buffer, buffer + pos, block_len)) {
			rc = -EIO;
			break;
		}

		pos += block_len;

		/* Avoid locking up the system */
		cond_resched();
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
	}

	if (retlen)
		*retlen = pos;
	return rc;
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
/**************************************************************************
 *
 * XMAC operations
 *
 **************************************************************************
 */

/* Configure the XAUI driver that is an output from Falcon */
static void falcon_setup_xaui(struct efx_nic *efx)
{
	efx_oword_t sdctl, txdrv;

	/* Move the XAUI into low power, unless there is no PHY, in
	 * which case the XAUI will have to drive a cable. */
	if (efx->phy_type == PHY_TYPE_NONE)
		return;

	efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
	efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);

	EFX_POPULATE_OWORD_8(txdrv,
			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
	efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
}

int falcon_reset_xaui(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg;
	int count;

	/* Don't fetch MAC statistics over an XMAC reset */
	WARN_ON(nic_data->stats_disable_count == 0);

	/* Start reset sequence */
	EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
	efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);

	/* Wait up to 10 ms for completion, then reinitialise */
	for (count = 0; count < 1000; count++) {
		efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
		if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
		    EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
			falcon_setup_xaui(efx);
			return 0;
		}
		udelay(10);
	}
	netif_err(efx, hw, efx->net_dev,
		  "timed out waiting for XAUI/XGXS reset\n");
	return -ETIMEDOUT;
}

static void falcon_ack_status_intr(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg;

	if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
		return;

	/* We expect xgmii faults if the wireside link is down */
	if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
		return;

	/* We can only use this interrupt to signal the negative edge of
	 * xaui_align [we have to poll the positive edge]. */
	if (nic_data->xmac_poll_required)
		return;

	efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}

static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
	efx_oword_t reg;
	bool align_done, link_ok = false;
	int sync_status;

	/* Read link status */
	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);

	align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
	sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
		link_ok = true;

	/* Clear link status ready for next read */
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);

	return link_ok;
}

static bool falcon_xmac_link_ok(struct efx_nic *efx)
{
	/*
	 * Check MAC's XGXS link status except when using XGMII loopback
	 * which bypasses the XGXS block.
	 * If possible, check PHY's XGXS link status except when using
	 * MAC loopback.
	 */
	return (efx->loopback_mode == LOOPBACK_XGMII ||
		falcon_xgxs_link_ok(efx)) &&
		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
		 LOOPBACK_INTERNAL(efx) ||
		 efx_mdio_phyxgxs_lane_sync(efx));
}

static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
	unsigned int max_frame_len;
	efx_oword_t reg;
	bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
	bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);

	/* Configure MAC  - cut-thru mode is hard wired on */
	EFX_POPULATE_OWORD_3(reg,
			     FRF_AB_XM_RX_JUMBO_MODE, 1,
			     FRF_AB_XM_TX_STAT_EN, 1,
			     FRF_AB_XM_RX_STAT_EN, 1);
	efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);

	/* Configure TX */
	EFX_POPULATE_OWORD_6(reg,
			     FRF_AB_XM_TXEN, 1,
			     FRF_AB_XM_TX_PRMBL, 1,
			     FRF_AB_XM_AUTO_PAD, 1,
			     FRF_AB_XM_TXCRC, 1,
			     FRF_AB_XM_FCNTL, tx_fc,
			     FRF_AB_XM_IPG, 0x3);
	efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);

	/* Configure RX */
	EFX_POPULATE_OWORD_5(reg,
			     FRF_AB_XM_RXEN, 1,
			     FRF_AB_XM_AUTO_DEPAD, 0,
			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
			     FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
			     FRF_AB_XM_PASS_CRC_ERR, 1);
	efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);

	/* Set frame length */
	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
	EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
	efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
	EFX_POPULATE_OWORD_2(reg,
			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
			     FRF_AB_XM_TX_JUMBO_MODE, 1);
	efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);

	EFX_POPULATE_OWORD_2(reg,
			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
	efx_writeo(efx, &reg, FR_AB_XM_FC);

	/* Set MAC address */
	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
	efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
	efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
}

static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
{
	efx_oword_t reg;
	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);

	/* XGXS block is flaky and will need to be reset if moving
	 * into our out of XGMII, XGXS or XAUI loopbacks. */
	if (EFX_WORKAROUND_5147(efx)) {
		bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
		bool reset_xgxs;

		efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
		old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
		old_xgmii_loopback =
			EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);

		efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
		old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);

		/* The PHY driver may have turned XAUI off */
		reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
			      (xaui_loopback != old_xaui_loopback) ||
			      (xgmii_loopback != old_xgmii_loopback));

		if (reset_xgxs)
			falcon_reset_xaui(efx);
	}

	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
			    (xgxs_loopback || xaui_loopback) ?
			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);

	efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
	efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
}


/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
	bool mac_up = falcon_xmac_link_ok(efx);

	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
	    efx_phy_mode_disabled(efx->phy_mode))
		/* XAUI link is expected to be down */
		return mac_up;

	falcon_stop_nic_stats(efx);

	while (!mac_up && tries) {
		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
		falcon_reset_xaui(efx);
		udelay(200);

		mac_up = falcon_xmac_link_ok(efx);
		--tries;
	}

	falcon_start_nic_stats(efx);

	return mac_up;
}

static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
	return !falcon_xmac_link_ok_retry(efx, 5);
}

static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	falcon_reconfigure_xgxs_core(efx);
	falcon_reconfigure_xmac_core(efx);

	falcon_reconfigure_mac_wrapper(efx);

	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
	falcon_ack_status_intr(efx);

	return 0;
}

static void falcon_update_stats_xmac(struct efx_nic *efx)
{
	struct efx_mac_stats *mac_stats = &efx->mac_stats;

	/* Update MAC stats from DMAed values */
	FALCON_STAT(efx, XgRxOctets, rx_bytes);
	FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
	FALCON_STAT(efx, XgRxPkts, rx_packets);
	FALCON_STAT(efx, XgRxPktsOK, rx_good);
	FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
	FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
	FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
	FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
	FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
	FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
	FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
	FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
	FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
	FALCON_STAT(efx, XgRxAlignError, rx_align_error);
	FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
	FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
	FALCON_STAT(efx, XgRxControlPkts, rx_control);
	FALCON_STAT(efx, XgRxPausePkts, rx_pause);
	FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
	FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
	FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
	FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
	FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
	FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
	FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
	FALCON_STAT(efx, XgRxLengthError, rx_length_error);
	FALCON_STAT(efx, XgTxPkts, tx_packets);
	FALCON_STAT(efx, XgTxOctets, tx_bytes);
	FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
	FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
	FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
	FALCON_STAT(efx, XgTxControlPkts, tx_control);
	FALCON_STAT(efx, XgTxPausePkts, tx_pause);
	FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
	FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
	FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
	FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
	FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
	FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
	FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
	FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
	FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
	FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
	FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
	FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);

	/* Update derived statistics */
	efx_update_diff_stat(&mac_stats->tx_good_bytes,
			     mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
			     mac_stats->tx_control * 64);
	efx_update_diff_stat(&mac_stats->rx_bad_bytes,
			     mac_stats->rx_bytes - mac_stats->rx_good_bytes -
			     mac_stats->rx_control * 64);
}

static void falcon_poll_xmac(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
	    !nic_data->xmac_poll_required)
		return;

	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
	falcon_ack_status_intr(efx);
}

957 958 959 960 961 962
/**************************************************************************
 *
 * MAC wrapper
 *
 **************************************************************************
 */
963

964 965 966 967 968 969 970 971 972 973
static void falcon_push_multicast_hash(struct efx_nic *efx)
{
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;

	WARN_ON(!mutex_is_locked(&efx->mac_lock));

	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
}

B
Ben Hutchings 已提交
974
static void falcon_reset_macs(struct efx_nic *efx)
975
{
B
Ben Hutchings 已提交
976 977
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg, mac_ctrl;
978 979
	int count;

980
	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
981 982 983
		/* It's not safe to use GLB_CTL_REG to reset the
		 * macs, so instead use the internal MAC resets
		 */
984 985 986 987 988 989 990 991 992
		EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
		efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);

		for (count = 0; count < 10000; count++) {
			efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
			if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
			    0)
				return;
			udelay(10);
993
		}
994 995 996

		netif_err(efx, hw, efx->net_dev,
			  "timed out waiting for XMAC core reset\n");
997
	}
998

B
Ben Hutchings 已提交
999 1000
	/* Mac stats will fail whist the TX fifo is draining */
	WARN_ON(nic_data->stats_disable_count == 0);
1001

B
Ben Hutchings 已提交
1002 1003 1004
	efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
	EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1005

1006
	efx_reado(efx, &reg, FR_AB_GLB_CTL);
1007 1008 1009
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1010
	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1011 1012 1013

	count = 0;
	while (1) {
1014
		efx_reado(efx, &reg, FR_AB_GLB_CTL);
1015 1016 1017
		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1018 1019 1020
			netif_dbg(efx, hw, efx->net_dev,
				  "Completed MAC reset after %d loops\n",
				  count);
1021 1022 1023
			break;
		}
		if (count > 20) {
1024
			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1025 1026 1027 1028 1029 1030
			break;
		}
		count++;
		udelay(10);
	}

B
Ben Hutchings 已提交
1031 1032 1033
	/* Ensure the correct MAC is selected before statistics
	 * are re-enabled by the caller */
	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1034 1035

	falcon_setup_xaui(efx);
1036 1037
}

1038
static void falcon_drain_tx_fifo(struct efx_nic *efx)
1039 1040 1041
{
	efx_oword_t reg;

1042
	if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1043 1044 1045
	    (efx->loopback_mode != LOOPBACK_NONE))
		return;

1046
	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1047
	/* There is no point in draining more than once */
1048
	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1049 1050 1051
		return;

	falcon_reset_macs(efx);
1052 1053
}

B
Ben Hutchings 已提交
1054
static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1055
{
1056
	efx_oword_t reg;
1057

1058
	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1059 1060 1061
		return;

	/* Isolate the MAC -> RX */
1062
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1063
	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1064
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1065

B
Ben Hutchings 已提交
1066 1067
	/* Isolate TX -> MAC */
	falcon_drain_tx_fifo(efx);
1068 1069
}

1070
static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1071
{
1072
	struct efx_link_state *link_state = &efx->link_state;
1073
	efx_oword_t reg;
1074 1075
	int link_speed, isolate;

1076
	isolate = !!ACCESS_ONCE(efx->reset_pending);
1077

1078
	switch (link_state->speed) {
B
Ben Hutchings 已提交
1079 1080 1081 1082 1083
	case 10000: link_speed = 3; break;
	case 1000:  link_speed = 2; break;
	case 100:   link_speed = 1; break;
	default:    link_speed = 0; break;
	}
1084 1085 1086 1087 1088
	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
	 * as advertised.  Disable to ensure packets are not
	 * indefinitely held and TX queue can be flushed at any point
	 * while the link is down. */
	EFX_POPULATE_OWORD_5(reg,
1089 1090 1091 1092 1093
			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
			     FRF_AB_MAC_BCAD_ACPT, 1,
			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
			     FRF_AB_MAC_SPEED, link_speed);
1094 1095
	/* On B0, MAC backpressure can be disabled and packets get
	 * discarded. */
1096
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1097
		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1098
				    !link_state->up || isolate);
1099 1100
	}

1101
	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1102 1103

	/* Restore the multicast hash registers. */
1104
	falcon_push_multicast_hash(efx);
1105

1106
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1107 1108 1109
	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
	 * initialisation but it may read back as 0) */
	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1110
	/* Unisolate the MAC -> RX */
1111
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1112
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1113
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1114 1115
}

1116
static void falcon_stats_request(struct efx_nic *efx)
1117
{
1118
	struct falcon_nic_data *nic_data = efx->nic_data;
1119 1120
	efx_oword_t reg;

1121 1122
	WARN_ON(nic_data->stats_pending);
	WARN_ON(nic_data->stats_disable_count);
1123

1124 1125
	if (nic_data->stats_dma_done == NULL)
		return;	/* no mac selected */
1126

1127 1128
	*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
	nic_data->stats_pending = true;
1129 1130 1131 1132
	wmb(); /* ensure done flag is clear */

	/* Initiate DMA transfer of stats */
	EFX_POPULATE_OWORD_2(reg,
1133 1134
			     FRF_AB_MAC_STAT_DMA_CMD, 1,
			     FRF_AB_MAC_STAT_DMA_ADR,
1135
			     efx->stats_buffer.dma_addr);
1136
	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1137

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
}

static void falcon_stats_complete(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	if (!nic_data->stats_pending)
		return;

1148
	nic_data->stats_pending = false;
1149 1150
	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
		rmb(); /* read the done flag before the stats */
1151
		falcon_update_stats_xmac(efx);
1152
	} else {
1153 1154
		netif_err(efx, hw, efx->net_dev,
			  "timed out waiting for statistics\n");
1155
	}
1156
}
1157

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
static void falcon_stats_timer_func(unsigned long context)
{
	struct efx_nic *efx = (struct efx_nic *)context;
	struct falcon_nic_data *nic_data = efx->nic_data;

	spin_lock(&efx->stats_lock);

	falcon_stats_complete(efx);
	if (nic_data->stats_disable_count == 0)
		falcon_stats_request(efx);

	spin_unlock(&efx->stats_lock);
1170 1171
}

S
Steve Hodgson 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
static bool falcon_loopback_link_poll(struct efx_nic *efx)
{
	struct efx_link_state old_state = efx->link_state;

	WARN_ON(!mutex_is_locked(&efx->mac_lock));
	WARN_ON(!LOOPBACK_INTERNAL(efx));

	efx->link_state.fd = true;
	efx->link_state.fc = efx->wanted_fc;
	efx->link_state.up = true;
1182
	efx->link_state.speed = 10000;
S
Steve Hodgson 已提交
1183 1184 1185 1186

	return !efx_link_state_equal(&efx->link_state, &old_state);
}

B
Ben Hutchings 已提交
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
static int falcon_reconfigure_port(struct efx_nic *efx)
{
	int rc;

	WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);

	/* Poll the PHY link state *before* reconfiguring it. This means we
	 * will pick up the correct speed (in loopback) to select the correct
	 * MAC.
	 */
	if (LOOPBACK_INTERNAL(efx))
		falcon_loopback_link_poll(efx);
	else
		efx->phy_op->poll(efx);

	falcon_stop_nic_stats(efx);
	falcon_deconfigure_mac_wrapper(efx);

1205
	falcon_reset_macs(efx);
B
Ben Hutchings 已提交
1206 1207

	efx->phy_op->reconfigure(efx);
1208
	rc = falcon_reconfigure_xmac(efx);
B
Ben Hutchings 已提交
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	BUG_ON(rc);

	falcon_start_nic_stats(efx);

	/* Synchronise efx->link_state with the kernel */
	efx_link_status_changed(efx);

	return 0;
}

1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
/* TX flow control may automatically turn itself off if the link
 * partner (intermittently) stops responding to pause frames. There
 * isn't any indication that this has happened, so the best we do is
 * leave it up to the user to spot this and fix it by cycling transmit
 * flow control on this end.
 */

static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
{
	/* Schedule a reset to recover */
	efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
}

static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
{
	/* Recover by resetting the EM block */
	falcon_stop_nic_stats(efx);
	falcon_drain_tx_fifo(efx);
	falcon_reconfigure_xmac(efx);
	falcon_start_nic_stats(efx);
}

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
/**************************************************************************
 *
 * PHY access via GMII
 *
 **************************************************************************
 */

/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
1251
	efx_oword_t md_stat;
1252 1253
	int count;

L
Lucas De Marchi 已提交
1254
	/* wait up to 50ms - taken max from datasheet */
1255
	for (count = 0; count < 5000; count++) {
1256 1257 1258 1259
		efx_reado(efx, &md_stat, FR_AB_MD_STAT);
		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1260 1261 1262 1263
				netif_err(efx, hw, efx->net_dev,
					  "error from GMII access "
					  EFX_OWORD_FMT"\n",
					  EFX_OWORD_VAL(md_stat));
1264 1265 1266 1267 1268 1269
				return -EIO;
			}
			return 0;
		}
		udelay(10);
	}
1270
	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1271 1272 1273
	return -ETIMEDOUT;
}

1274 1275 1276
/* Write an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_write(struct net_device *net_dev,
			     int prtad, int devad, u16 addr, u16 value)
1277
{
1278
	struct efx_nic *efx = netdev_priv(net_dev);
1279
	struct falcon_nic_data *nic_data = efx->nic_data;
1280
	efx_oword_t reg;
1281
	int rc;
1282

1283 1284
	netif_vdbg(efx, hw, efx->net_dev,
		   "writing MDIO %d register %d.%d with 0x%04x\n",
1285
		    prtad, devad, addr, value);
1286

1287
	mutex_lock(&nic_data->mdio_lock);
1288

1289 1290 1291
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
1292 1293 1294
		goto out;

	/* Write the address/ID register */
1295
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1296
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1297

1298 1299
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
1300
	efx_writeo(efx, &reg, FR_AB_MD_ID);
1301 1302

	/* Write data */
1303
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1304
	efx_writeo(efx, &reg, FR_AB_MD_TXD);
1305 1306

	EFX_POPULATE_OWORD_2(reg,
1307 1308
			     FRF_AB_MD_WRC, 1,
			     FRF_AB_MD_GC, 0);
1309
	efx_writeo(efx, &reg, FR_AB_MD_CS);
1310 1311

	/* Wait for data to be written */
1312 1313
	rc = falcon_gmii_wait(efx);
	if (rc) {
1314 1315
		/* Abort the write operation */
		EFX_POPULATE_OWORD_2(reg,
1316 1317
				     FRF_AB_MD_WRC, 0,
				     FRF_AB_MD_GC, 1);
1318
		efx_writeo(efx, &reg, FR_AB_MD_CS);
1319 1320 1321
		udelay(10);
	}

1322
out:
1323
	mutex_unlock(&nic_data->mdio_lock);
1324
	return rc;
1325 1326
}

1327 1328 1329
/* Read an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_read(struct net_device *net_dev,
			    int prtad, int devad, u16 addr)
1330
{
1331
	struct efx_nic *efx = netdev_priv(net_dev);
1332
	struct falcon_nic_data *nic_data = efx->nic_data;
1333
	efx_oword_t reg;
1334
	int rc;
1335

1336
	mutex_lock(&nic_data->mdio_lock);
1337

1338 1339 1340
	/* Check MDIO not currently being accessed */
	rc = falcon_gmii_wait(efx);
	if (rc)
1341 1342
		goto out;

1343
	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1344
	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1345

1346 1347
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
			     FRF_AB_MD_DEV_ADR, devad);
1348
	efx_writeo(efx, &reg, FR_AB_MD_ID);
1349 1350

	/* Request data to be read */
1351
	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1352
	efx_writeo(efx, &reg, FR_AB_MD_CS);
1353 1354

	/* Wait for data to become available */
1355 1356
	rc = falcon_gmii_wait(efx);
	if (rc == 0) {
1357
		efx_reado(efx, &reg, FR_AB_MD_RXD);
1358
		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1359 1360 1361
		netif_vdbg(efx, hw, efx->net_dev,
			   "read from MDIO %d register %d.%d, got %04x\n",
			   prtad, devad, addr, rc);
1362 1363 1364
	} else {
		/* Abort the read operation */
		EFX_POPULATE_OWORD_2(reg,
1365 1366
				     FRF_AB_MD_RIC, 0,
				     FRF_AB_MD_GC, 1);
1367
		efx_writeo(efx, &reg, FR_AB_MD_CS);
1368

1369 1370 1371
		netif_dbg(efx, hw, efx->net_dev,
			  "read from MDIO %d register %d.%d, got error %d\n",
			  prtad, devad, addr, rc);
1372 1373
	}

1374
out:
1375
	mutex_unlock(&nic_data->mdio_lock);
1376
	return rc;
1377 1378 1379
}

/* This call is responsible for hooking in the MAC and PHY operations */
1380
static int falcon_probe_port(struct efx_nic *efx)
1381
{
1382
	struct falcon_nic_data *nic_data = efx->nic_data;
1383 1384
	int rc;

1385 1386 1387 1388 1389 1390
	switch (efx->phy_type) {
	case PHY_TYPE_SFX7101:
		efx->phy_op = &falcon_sfx7101_phy_ops;
		break;
	case PHY_TYPE_QT2022C2:
	case PHY_TYPE_QT2025C:
1391
		efx->phy_op = &falcon_qt202x_phy_ops;
1392
		break;
1393 1394 1395
	case PHY_TYPE_TXC43128:
		efx->phy_op = &falcon_txc_phy_ops;
		break;
1396
	default:
1397 1398
		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
			  efx->phy_type);
1399 1400 1401
		return -ENODEV;
	}

1402
	/* Fill out MDIO structure and loopback modes */
1403
	mutex_init(&nic_data->mdio_lock);
1404 1405
	efx->mdio.mdio_read = falcon_mdio_read;
	efx->mdio.mdio_write = falcon_mdio_write;
1406 1407 1408
	rc = efx->phy_op->probe(efx);
	if (rc != 0)
		return rc;
1409

1410 1411 1412 1413
	/* Initial assumption */
	efx->link_state.speed = 10000;
	efx->link_state.fd = true;

1414
	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1415
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
B
Ben Hutchings 已提交
1416
		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
1417
	else
B
Ben Hutchings 已提交
1418
		efx->wanted_fc = EFX_FC_RX;
1419 1420
	if (efx->mdio.mmds & MDIO_DEVS_AN)
		efx->wanted_fc |= EFX_FC_AUTO;
1421 1422

	/* Allocate buffer for stats */
1423
	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
1424
				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1425 1426
	if (rc)
		return rc;
1427 1428 1429 1430 1431
	netif_dbg(efx, probe, efx->net_dev,
		  "stats buffer at %llx (virt %p phys %llx)\n",
		  (u64)efx->stats_buffer.dma_addr,
		  efx->stats_buffer.addr,
		  (u64)virt_to_phys(efx->stats_buffer.addr));
1432
	nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
1433 1434 1435 1436

	return 0;
}

1437
static void falcon_remove_port(struct efx_nic *efx)
1438
{
1439
	efx->phy_op->remove(efx);
1440
	efx_nic_free_buffer(efx, &efx->stats_buffer);
1441 1442
}

1443 1444 1445 1446 1447
/* Global events are basically PHY events */
static bool
falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
{
	struct efx_nic *efx = channel->efx;
1448
	struct falcon_nic_data *nic_data = efx->nic_data;
1449 1450 1451 1452 1453 1454 1455 1456 1457

	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
		/* Ignored */
		return true;

	if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1458
		nic_data->xmac_poll_required = true;
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
		return true;
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
		netif_err(efx, rx_err, efx->net_dev,
			  "channel %d seen global RX_RESET event. Resetting.\n",
			  channel->channel);

		atomic_inc(&efx->rx_reset);
		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
		return true;
	}

	return false;
}

B
Ben Hutchings 已提交
1478 1479 1480 1481 1482 1483
/**************************************************************************
 *
 * Falcon test code
 *
 **************************************************************************/

1484 1485
static int
falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
B
Ben Hutchings 已提交
1486
{
1487
	struct falcon_nic_data *nic_data = efx->nic_data;
B
Ben Hutchings 已提交
1488 1489 1490 1491 1492 1493 1494
	struct falcon_nvconfig *nvconfig;
	struct efx_spi_device *spi;
	void *region;
	int rc, magic_num, struct_ver;
	__le16 *word, *limit;
	u32 csum;

1495 1496 1497 1498 1499
	if (efx_spi_present(&nic_data->spi_flash))
		spi = &nic_data->spi_flash;
	else if (efx_spi_present(&nic_data->spi_eeprom))
		spi = &nic_data->spi_eeprom;
	else
1500 1501
		return -EINVAL;

1502
	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
B
Ben Hutchings 已提交
1503 1504
	if (!region)
		return -ENOMEM;
1505
	nvconfig = region + FALCON_NVCONFIG_OFFSET;
B
Ben Hutchings 已提交
1506

1507
	mutex_lock(&nic_data->spi_lock);
1508
	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1509
	mutex_unlock(&nic_data->spi_lock);
B
Ben Hutchings 已提交
1510
	if (rc) {
1511
		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1512 1513
			  efx_spi_present(&nic_data->spi_flash) ?
			  "flash" : "EEPROM");
B
Ben Hutchings 已提交
1514 1515 1516 1517 1518 1519 1520 1521
		rc = -EIO;
		goto out;
	}

	magic_num = le16_to_cpu(nvconfig->board_magic_num);
	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);

	rc = -EINVAL;
1522
	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1523 1524
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM bad magic 0x%x\n", magic_num);
B
Ben Hutchings 已提交
1525 1526 1527
		goto out;
	}
	if (struct_ver < 2) {
1528 1529
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM has ancient version 0x%x\n", struct_ver);
B
Ben Hutchings 已提交
1530 1531 1532 1533 1534 1535
		goto out;
	} else if (struct_ver < 4) {
		word = &nvconfig->board_magic_num;
		limit = (__le16 *) (nvconfig + 1);
	} else {
		word = region;
1536
		limit = region + FALCON_NVCONFIG_END;
B
Ben Hutchings 已提交
1537 1538 1539 1540 1541
	}
	for (csum = 0; word < limit; ++word)
		csum += le16_to_cpu(*word);

	if (~csum & 0xffff) {
1542 1543
		netif_err(efx, hw, efx->net_dev,
			  "NVRAM has incorrect checksum\n");
B
Ben Hutchings 已提交
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
		goto out;
	}

	rc = 0;
	if (nvconfig_out)
		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));

 out:
	kfree(region);
	return rc;
}

1556 1557 1558 1559 1560
static int falcon_test_nvram(struct efx_nic *efx)
{
	return falcon_read_nvram(efx, NULL);
}

1561
static const struct efx_farch_register_test falcon_b0_register_tests[] = {
1562
	{ FR_AZ_ADR_REGION,
1563
	  EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1564
	{ FR_AZ_RX_CFG,
B
Ben Hutchings 已提交
1565
	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1566
	{ FR_AZ_TX_CFG,
B
Ben Hutchings 已提交
1567
	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1568
	{ FR_AZ_TX_RESERVED,
B
Ben Hutchings 已提交
1569
	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1570
	{ FR_AB_MAC_CTRL,
B
Ben Hutchings 已提交
1571
	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1572
	{ FR_AZ_SRM_TX_DC_CFG,
B
Ben Hutchings 已提交
1573
	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1574
	{ FR_AZ_RX_DC_CFG,
B
Ben Hutchings 已提交
1575
	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1576
	{ FR_AZ_RX_DC_PF_WM,
B
Ben Hutchings 已提交
1577
	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1578
	{ FR_BZ_DP_CTRL,
B
Ben Hutchings 已提交
1579
	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1580
	{ FR_AB_GM_CFG2,
1581
	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1582
	{ FR_AB_GMF_CFG0,
1583
	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1584
	{ FR_AB_XM_GLB_CFG,
B
Ben Hutchings 已提交
1585
	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1586
	{ FR_AB_XM_TX_CFG,
B
Ben Hutchings 已提交
1587
	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1588
	{ FR_AB_XM_RX_CFG,
B
Ben Hutchings 已提交
1589
	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1590
	{ FR_AB_XM_RX_PARAM,
B
Ben Hutchings 已提交
1591
	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1592
	{ FR_AB_XM_FC,
B
Ben Hutchings 已提交
1593
	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1594
	{ FR_AB_XM_ADR_LO,
B
Ben Hutchings 已提交
1595
	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1596
	{ FR_AB_XX_SD_CTL,
B
Ben Hutchings 已提交
1597 1598 1599
	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};

1600 1601
static int
falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1602
{
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
	int rc, rc2;

	mutex_lock(&efx->mac_lock);
	if (efx->loopback_modes) {
		/* We need the 312 clock from the PHY to test the XMAC
		 * registers, so move into XGMII loopback if available */
		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
			efx->loopback_mode = LOOPBACK_XGMII;
		else
			efx->loopback_mode = __ffs(efx->loopback_modes);
	}
	__efx_reconfigure_port(efx);
	mutex_unlock(&efx->mac_lock);

	efx_reset_down(efx, reset_method);

	tests->registers =
1621 1622
		efx_farch_test_registers(efx, falcon_b0_register_tests,
					 ARRAY_SIZE(falcon_b0_register_tests))
1623 1624 1625 1626 1627
		? -1 : 1;

	rc = falcon_reset_hw(efx, reset_method);
	rc2 = efx_reset_up(efx, reset_method, rc == 0);
	return rc ? rc : rc2;
1628 1629
}

1630 1631 1632 1633 1634 1635 1636
/**************************************************************************
 *
 * Device reset
 *
 **************************************************************************
 */

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
	switch (reason) {
	case RESET_TYPE_RX_RECOVERY:
	case RESET_TYPE_RX_DESC_FETCH:
	case RESET_TYPE_TX_DESC_FETCH:
	case RESET_TYPE_TX_SKIP:
		/* These can occasionally occur due to hardware bugs.
		 * We try to reset without disrupting the link.
		 */
		return RESET_TYPE_INVISIBLE;
	default:
		return RESET_TYPE_ALL;
	}
}

static int falcon_map_reset_flags(u32 *flags)
{
	enum {
		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
	};

	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
		*flags &= ~FALCON_RESET_WORLD;
		return RESET_TYPE_WORLD;
	}

	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
		*flags &= ~FALCON_RESET_ALL;
		return RESET_TYPE_ALL;
	}

	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
		*flags &= ~FALCON_RESET_INVISIBLE;
		return RESET_TYPE_INVISIBLE;
	}

	return -EINVAL;
}

1680 1681
/* Resets NIC to known state.  This routine must be called in process
 * context and is allowed to sleep. */
1682
static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1683 1684 1685 1686 1687
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t glb_ctl_reg_ker;
	int rc;

1688 1689
	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
		  RESET_TYPE(method));
1690 1691 1692 1693 1694

	/* Initiate device reset */
	if (method == RESET_TYPE_WORLD) {
		rc = pci_save_state(efx->pci_dev);
		if (rc) {
1695 1696 1697
			netif_err(efx, drv, efx->net_dev,
				  "failed to backup PCI state of primary "
				  "function prior to hardware reset\n");
1698 1699
			goto fail1;
		}
1700
		if (efx_nic_is_dual_func(efx)) {
1701 1702
			rc = pci_save_state(nic_data->pci_dev2);
			if (rc) {
1703 1704 1705 1706
				netif_err(efx, drv, efx->net_dev,
					  "failed to backup PCI state of "
					  "secondary function prior to "
					  "hardware reset\n");
1707 1708 1709 1710 1711
				goto fail2;
			}
		}

		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1712 1713 1714
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
1715 1716
	} else {
		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
				     /* exclude PHY from "invisible" reset */
				     FRF_AB_EXT_PHY_RST_CTL,
				     method == RESET_TYPE_INVISIBLE,
				     /* exclude EEPROM/flash and PCIe */
				     FRF_AB_PCIE_CORE_RST_CTL, 1,
				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
				     FRF_AB_PCIE_SD_RST_CTL, 1,
				     FRF_AB_EE_RST_CTL, 1,
				     FRF_AB_EXT_PHY_RST_DUR,
				     FFE_AB_EXT_PHY_RST_DUR_10240US,
				     FRF_AB_SWRST, 1);
	}
1729
	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1730

1731
	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1732 1733 1734 1735
	schedule_timeout_uninterruptible(HZ / 20);

	/* Restore PCI configuration if needed */
	if (method == RESET_TYPE_WORLD) {
1736 1737 1738
		if (efx_nic_is_dual_func(efx))
			pci_restore_state(nic_data->pci_dev2);
		pci_restore_state(efx->pci_dev);
1739 1740
		netif_dbg(efx, drv, efx->net_dev,
			  "successfully restored PCI config\n");
1741 1742 1743
	}

	/* Assert that reset complete */
1744
	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1745
	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1746
		rc = -ETIMEDOUT;
1747 1748
		netif_err(efx, hw, efx->net_dev,
			  "timed out waiting for hardware reset\n");
1749
		goto fail3;
1750
	}
1751
	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1752 1753 1754 1755 1756 1757 1758

	return 0;

	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
	pci_restore_state(efx->pci_dev);
fail1:
1759
fail3:
1760 1761 1762
	return rc;
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	int rc;

	mutex_lock(&nic_data->spi_lock);
	rc = __falcon_reset_hw(efx, method);
	mutex_unlock(&nic_data->spi_lock);

	return rc;
}

1775
static void falcon_monitor(struct efx_nic *efx)
1776
{
S
Steve Hodgson 已提交
1777
	bool link_changed;
1778 1779
	int rc;

S
Steve Hodgson 已提交
1780 1781
	BUG_ON(!mutex_is_locked(&efx->mac_lock));

1782 1783
	rc = falcon_board(efx)->type->monitor(efx);
	if (rc) {
1784 1785 1786
		netif_err(efx, hw, efx->net_dev,
			  "Board sensor %s; shutting down PHY\n",
			  (rc == -ERANGE) ? "reported fault" : "failed");
1787
		efx->phy_mode |= PHY_MODE_LOW_POWER;
B
Ben Hutchings 已提交
1788 1789
		rc = __efx_reconfigure_port(efx);
		WARN_ON(rc);
1790
	}
S
Steve Hodgson 已提交
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

	if (LOOPBACK_INTERNAL(efx))
		link_changed = falcon_loopback_link_poll(efx);
	else
		link_changed = efx->phy_op->poll(efx);

	if (link_changed) {
		falcon_stop_nic_stats(efx);
		falcon_deconfigure_mac_wrapper(efx);

1801
		falcon_reset_macs(efx);
1802
		rc = falcon_reconfigure_xmac(efx);
B
Ben Hutchings 已提交
1803
		BUG_ON(rc);
S
Steve Hodgson 已提交
1804 1805 1806 1807 1808 1809

		falcon_start_nic_stats(efx);

		efx_link_status_changed(efx);
	}

1810
	falcon_poll_xmac(efx);
1811 1812
}

1813 1814 1815 1816 1817 1818 1819 1820 1821
/* Zeroes out the SRAM contents.  This routine must be called in
 * process context and is allowed to sleep.
 */
static int falcon_reset_sram(struct efx_nic *efx)
{
	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
	int count;

	/* Set the SRAM wake/sleep GPIO appropriately. */
1822
	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1823 1824
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1825
	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1826 1827 1828

	/* Initiate SRAM reset */
	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1829 1830
			     FRF_AZ_SRM_INIT_EN, 1,
			     FRF_AZ_SRM_NB_SZ, 0);
1831
	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1832 1833 1834 1835

	/* Wait for SRAM reset to complete */
	count = 0;
	do {
1836 1837
		netif_dbg(efx, hw, efx->net_dev,
			  "waiting for SRAM reset (attempt %d)...\n", count);
1838 1839 1840 1841 1842

		/* SRAM reset is slow; expect around 16ms */
		schedule_timeout_uninterruptible(HZ / 50);

		/* Check for reset complete */
1843
		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1844
		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1845 1846
			netif_dbg(efx, hw, efx->net_dev,
				  "SRAM reset complete\n");
1847 1848 1849

			return 0;
		}
L
Lucas De Marchi 已提交
1850
	} while (++count < 20);	/* wait up to 0.4 sec */
1851

1852
	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1853 1854 1855
	return -ETIMEDOUT;
}

1856 1857
static void falcon_spi_device_init(struct efx_nic *efx,
				  struct efx_spi_device *spi_device,
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
				  unsigned int device_id, u32 device_type)
{
	if (device_type != 0) {
		spi_device->device_id = device_id;
		spi_device->size =
			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
		spi_device->addr_len =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
		spi_device->munge_address = (spi_device->size == 1 << 9 &&
					     spi_device->addr_len == 1);
1868 1869 1870 1871 1872
		spi_device->erase_command =
			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
		spi_device->erase_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_ERASE_SIZE);
1873 1874 1875 1876
		spi_device->block_size =
			1 << SPI_DEV_TYPE_FIELD(device_type,
						SPI_DEV_TYPE_BLOCK_SIZE);
	} else {
1877
		spi_device->size = 0;
1878 1879 1880
	}
}

1881 1882 1883
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
1884
	struct falcon_nic_data *nic_data = efx->nic_data;
1885 1886 1887 1888
	struct falcon_nvconfig *nvconfig;
	int rc;

	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1889 1890
	if (!nvconfig)
		return -ENOMEM;
1891

B
Ben Hutchings 已提交
1892
	rc = falcon_read_nvram(efx, nvconfig);
1893
	if (rc)
1894
		goto out;
1895 1896 1897 1898 1899

	efx->phy_type = nvconfig->board_v2.port0_phy_type;
	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;

	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1900 1901
		falcon_spi_device_init(
			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1902 1903
			le32_to_cpu(nvconfig->board_v3
				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1904 1905
		falcon_spi_device_init(
			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1906 1907
			le32_to_cpu(nvconfig->board_v3
				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1908 1909
	}

B
Ben Hutchings 已提交
1910
	/* Read the MAC addresses */
1911
	memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
B
Ben Hutchings 已提交
1912

1913 1914
	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
		  efx->phy_type, efx->mdio.prtad);
1915

1916 1917
	rc = falcon_probe_board(efx,
				le16_to_cpu(nvconfig->board_v2.board_revision));
1918
out:
1919 1920 1921 1922
	kfree(nvconfig);
	return rc;
}

1923 1924 1925 1926 1927 1928
static void falcon_dimension_resources(struct efx_nic *efx)
{
	efx->rx_dc_base = 0x20000;
	efx->tx_dc_base = 0x26000;
}

1929 1930 1931
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
1932
	struct falcon_nic_data *nic_data = efx->nic_data;
1933
	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1934
	int boot_dev;
1935

1936 1937 1938
	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1939

1940 1941 1942
	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1943 1944 1945
		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
			  "flash" : "EEPROM");
1946 1947 1948 1949
	} else {
		/* Disable VPD and set clock dividers to safe
		 * values for initial programming. */
		boot_dev = -1;
1950 1951 1952
		netif_dbg(efx, probe, efx->net_dev,
			  "Booted from internal ASIC settings;"
			  " setting SPI config\n");
1953
		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1954
				     /* 125 MHz / 7 ~= 20 MHz */
1955
				     FRF_AB_EE_SF_CLOCK_DIV, 7,
1956
				     /* 125 MHz / 63 ~= 2 MHz */
1957
				     FRF_AB_EE_EE_CLOCK_DIV, 63);
1958
		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1959 1960
	}

1961 1962
	mutex_init(&nic_data->spi_lock);

1963
	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1964
		falcon_spi_device_init(efx, &nic_data->spi_flash,
1965
				       FFE_AB_SPI_DEVICE_FLASH,
1966
				       default_flash_type);
1967
	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1968
		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1969
				       FFE_AB_SPI_DEVICE_EEPROM,
1970
				       large_eeprom_type);
1971 1972
}

1973
static int falcon_probe_nic(struct efx_nic *efx)
1974 1975
{
	struct falcon_nic_data *nic_data;
1976
	struct falcon_board *board;
1977 1978 1979 1980
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1981 1982
	if (!nic_data)
		return -ENOMEM;
1983
	efx->nic_data = nic_data;
1984

1985 1986
	rc = -ENODEV;

1987
	if (efx_farch_fpga_ver(efx) != 0) {
1988 1989
		netif_err(efx, probe, efx->net_dev,
			  "Falcon FPGA not supported\n");
1990
		goto fail1;
1991 1992 1993 1994 1995 1996
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
		efx_oword_t nic_stat;
		struct pci_dev *dev;
		u8 pci_rev = efx->pci_dev->revision;
1997

1998
		if ((pci_rev == 0xff) || (pci_rev == 0)) {
1999 2000
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A0 not supported\n");
2001 2002 2003 2004
			goto fail1;
		}
		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2005 2006
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 1G not supported\n");
2007 2008 2009
			goto fail1;
		}
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2010 2011
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 PCI-X not supported\n");
2012 2013
			goto fail1;
		}
2014

2015
		dev = pci_dev_get(efx->pci_dev);
2016 2017
		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2018 2019 2020 2021 2022 2023 2024 2025
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
2026 2027
			netif_err(efx, probe, efx->net_dev,
				  "failed to find secondary function\n");
2028 2029 2030 2031 2032 2033
			rc = -ENODEV;
			goto fail2;
		}
	}

	/* Now we can reset the NIC */
2034
	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2035
	if (rc) {
2036
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2037 2038 2039 2040
		goto fail3;
	}

	/* Allocate memory for INT_KER */
2041 2042
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
				  GFP_KERNEL);
2043 2044 2045 2046
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

2047 2048 2049 2050 2051
	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (u64)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (u64)virt_to_phys(efx->irq_status.addr));
2052

2053 2054
	falcon_probe_spi_devices(efx);

2055 2056
	/* Read in the non-volatile configuration */
	rc = falcon_probe_nvconfig(efx);
2057 2058 2059
	if (rc) {
		if (rc == -EINVAL)
			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2060
		goto fail5;
2061
	}
2062

2063 2064
	efx->timer_quantum_ns = 4968; /* 621 cycles */

2065
	/* Initialise I2C adapter */
2066 2067 2068 2069 2070 2071 2072 2073 2074
	board = falcon_board(efx);
	board->i2c_adap.owner = THIS_MODULE;
	board->i2c_data = falcon_i2c_bit_operations;
	board->i2c_data.data = efx;
	board->i2c_adap.algo_data = &board->i2c_data;
	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
		sizeof(board->i2c_adap.name));
	rc = i2c_bit_add_bus(&board->i2c_adap);
2075 2076 2077
	if (rc)
		goto fail5;

2078
	rc = falcon_board(efx)->type->init(efx);
2079
	if (rc) {
2080 2081
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise board\n");
2082 2083 2084
		goto fail6;
	}

2085 2086 2087 2088
	nic_data->stats_disable_count = 1;
	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
		    (unsigned long)efx);

2089 2090
	return 0;

2091
 fail6:
2092
	i2c_del_adapter(&board->i2c_adap);
2093
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2094
 fail5:
2095
	efx_nic_free_buffer(efx, &efx->irq_status);
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}

2108 2109 2110 2111 2112 2113 2114
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
	/* RX control FIFO thresholds (32 entries) */
	const unsigned ctrl_xon_thr = 20;
	const unsigned ctrl_xoff_thr = 25;
	efx_oword_t reg;

2115
	efx_reado(efx, &reg, FR_AZ_RX_CFG);
2116
	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2117 2118 2119 2120 2121 2122
		/* Data FIFO size is 5.5K.  The RX DMA engine only
		 * supports scattering for user-mode queues, but will
		 * split DMA writes at intervals of RX_USR_BUF_SIZE
		 * (32-byte units) even for kernel-mode queues.  We
		 * set it to be so large that that never happens.
		 */
2123 2124
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2125
				    (3 * 4096) >> 5);
2126 2127
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2128 2129
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2130
	} else {
2131
		/* Data FIFO size is 80K; register fields moved */
2132 2133
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2134
				    EFX_RX_USR_BUF_SIZE >> 5);
2135 2136 2137
		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2138 2139 2140
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2141 2142 2143 2144 2145 2146 2147

		/* Enable hash insertion. This is broken for the
		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
		 * IPv4 hashes. */
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2148
	}
2149 2150 2151
	/* Always enable XOFF signal from RX FIFO.  We enable
	 * or disable transmission of pause frames at the MAC. */
	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2152
	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2153 2154
}

2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
/* This call performs hardware-specific global initialisation, such as
 * defining the descriptor cache sizes and number of RSS channels.
 * It does not set up any buffers, descriptor rings or event queues.
 */
static int falcon_init_nic(struct efx_nic *efx)
{
	efx_oword_t temp;
	int rc;

	/* Use on-chip SRAM */
	efx_reado(efx, &temp, FR_AB_NIC_STAT);
	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
	efx_writeo(efx, &temp, FR_AB_NIC_STAT);

	rc = falcon_reset_sram(efx);
	if (rc)
		return rc;

	/* Clear the parity enables on the TX data fifos as
	 * they produce false parity errors because of timing issues
	 */
	if (EFX_WORKAROUND_5129(efx)) {
		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
	}

2182
	if (EFX_WORKAROUND_7244(efx)) {
2183
		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2184 2185 2186 2187
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2188
		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2189 2190
	}

2191
	/* XXX This is documented only for Falcon A0/A1 */
2192 2193 2194
	/* Setup RX.  Wait for descriptor is broken and must
	 * be disabled.  RXDP recovery shouldn't be needed, but is.
	 */
2195
	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
2196 2197
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2198
	if (EFX_WORKAROUND_5583(efx))
2199
		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2200
	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2201 2202 2203 2204

	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
	 * descriptors (which is bad).
	 */
2205
	efx_reado(efx, &temp, FR_AZ_TX_CFG);
2206
	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2207
	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
2208

2209
	falcon_init_rx_cfg(efx);
2210

2211
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2212 2213 2214 2215 2216
		/* Set hash key for IPv4 */
		memcpy(&temp, efx->rx_hash_key, sizeof(temp));
		efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);

		/* Set destination of both TX and RX Flush events */
2217
		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2218
		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
2219 2220
	}

2221
	efx_farch_init_common(efx);
2222

2223 2224 2225
	return 0;
}

2226
static void falcon_remove_nic(struct efx_nic *efx)
2227 2228
{
	struct falcon_nic_data *nic_data = efx->nic_data;
2229
	struct falcon_board *board = falcon_board(efx);
2230

2231
	board->type->fini(efx);
2232

2233
	/* Remove I2C adapter and clear it in preparation for a retry */
2234
	i2c_del_adapter(&board->i2c_adap);
2235
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2236

2237
	efx_nic_free_buffer(efx, &efx->irq_status);
2238

2239
	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251

	/* Release the second function after the reset */
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}

	/* Tear down the private nic state */
	kfree(efx->nic_data);
	efx->nic_data = NULL;
}

2252
static void falcon_update_nic_stats(struct efx_nic *efx)
2253
{
2254
	struct falcon_nic_data *nic_data = efx->nic_data;
2255 2256
	efx_oword_t cnt;

2257 2258 2259
	if (nic_data->stats_disable_count)
		return;

2260
	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2261 2262
	efx->n_rx_nodesc_drop_cnt +=
		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2263 2264 2265 2266 2267

	if (nic_data->stats_pending &&
	    *nic_data->stats_dma_done == FALCON_STATS_DONE) {
		nic_data->stats_pending = false;
		rmb(); /* read the done flag before the stats */
2268
		falcon_update_stats_xmac(efx);
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
	}
}

void falcon_start_nic_stats(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	spin_lock_bh(&efx->stats_lock);
	if (--nic_data->stats_disable_count == 0)
		falcon_stats_request(efx);
	spin_unlock_bh(&efx->stats_lock);
}

void falcon_stop_nic_stats(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	int i;

	might_sleep();

	spin_lock_bh(&efx->stats_lock);
	++nic_data->stats_disable_count;
	spin_unlock_bh(&efx->stats_lock);

	del_timer_sync(&nic_data->stats_timer);

	/* Wait enough time for the most recent transfer to
	 * complete. */
	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
		if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
			break;
		msleep(1);
	}

	spin_lock_bh(&efx->stats_lock);
	falcon_stats_complete(efx);
	spin_unlock_bh(&efx->stats_lock);
2306 2307
}

2308 2309 2310 2311 2312
static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
	falcon_board(efx)->type->set_id_led(efx, mode);
}

2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
/**************************************************************************
 *
 * Wake on LAN
 *
 **************************************************************************
 */

static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
	wol->supported = 0;
	wol->wolopts = 0;
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}

static int falcon_set_wol(struct efx_nic *efx, u32 type)
{
	if (type != 0)
		return -EINVAL;
	return 0;
}

2334 2335
/**************************************************************************
 *
2336
 * Revision-dependent attributes used by efx.c and nic.c
2337 2338 2339 2340
 *
 **************************************************************************
 */

2341
const struct efx_nic_type falcon_a1_nic_type = {
2342 2343 2344
	.probe = falcon_probe_nic,
	.remove = falcon_remove_nic,
	.init = falcon_init_nic,
2345
	.dimension_resources = falcon_dimension_resources,
2346
	.fini = falcon_irq_ack_a1,
2347
	.monitor = falcon_monitor,
2348 2349
	.map_reset_reason = falcon_map_reset_reason,
	.map_reset_flags = falcon_map_reset_flags,
2350 2351 2352
	.reset = falcon_reset_hw,
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
2353
	.handle_global_event = falcon_handle_global_event,
2354
	.fini_dmaq = efx_farch_fini_dmaq,
2355
	.prepare_flush = falcon_prepare_flush,
2356
	.finish_flush = efx_port_dummy_op_void,
2357 2358 2359
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
2360
	.set_id_led = falcon_set_id_led,
2361
	.push_irq_moderation = falcon_push_irq_moderation,
B
Ben Hutchings 已提交
2362
	.reconfigure_port = falcon_reconfigure_port,
2363
	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2364 2365
	.reconfigure_mac = falcon_reconfigure_xmac,
	.check_mac_fault = falcon_xmac_check_fault,
2366 2367 2368
	.get_wol = falcon_get_wol,
	.set_wol = falcon_set_wol,
	.resume_wol = efx_port_dummy_op_void,
2369
	.test_nvram = falcon_test_nvram,
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
	.irq_enable_master = efx_farch_irq_enable_master,
	.irq_test_generate = efx_farch_irq_test_generate,
	.irq_disable_non_ev = efx_farch_irq_disable_master,
	.irq_handle_msi = efx_farch_msi_interrupt,
	.irq_handle_legacy = falcon_legacy_interrupt_a1,
	.tx_probe = efx_farch_tx_probe,
	.tx_init = efx_farch_tx_init,
	.tx_remove = efx_farch_tx_remove,
	.tx_write = efx_farch_tx_write,
	.rx_push_indir_table = efx_farch_rx_push_indir_table,
	.rx_probe = efx_farch_rx_probe,
	.rx_init = efx_farch_rx_init,
	.rx_remove = efx_farch_rx_remove,
	.rx_write = efx_farch_rx_write,
	.rx_defer_refill = efx_farch_rx_defer_refill,
	.ev_probe = efx_farch_ev_probe,
	.ev_init = efx_farch_ev_init,
	.ev_fini = efx_farch_ev_fini,
	.ev_remove = efx_farch_ev_remove,
	.ev_process = efx_farch_ev_process,
	.ev_read_ack = efx_farch_ev_read_ack,
	.ev_test_generate = efx_farch_ev_test_generate,
2392

2393
	.revision = EFX_REV_FALCON_A1,
2394
	.mem_map_size = 0x20000,
2395 2396 2397 2398 2399
	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2400
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2401
	.rx_buffer_padding = 0x24,
2402
	.can_rx_scatter = false,
2403 2404
	.max_interrupt_mode = EFX_INT_MODE_MSI,
	.phys_addr_channels = 4,
2405
	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2406
	.offload_features = NETIF_F_IP_CSUM,
2407 2408
};

2409
const struct efx_nic_type falcon_b0_nic_type = {
2410 2411 2412
	.probe = falcon_probe_nic,
	.remove = falcon_remove_nic,
	.init = falcon_init_nic,
2413
	.dimension_resources = falcon_dimension_resources,
2414 2415
	.fini = efx_port_dummy_op_void,
	.monitor = falcon_monitor,
2416 2417
	.map_reset_reason = falcon_map_reset_reason,
	.map_reset_flags = falcon_map_reset_flags,
2418 2419 2420
	.reset = falcon_reset_hw,
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
2421
	.handle_global_event = falcon_handle_global_event,
2422
	.fini_dmaq = efx_farch_fini_dmaq,
2423
	.prepare_flush = falcon_prepare_flush,
2424
	.finish_flush = efx_port_dummy_op_void,
2425 2426 2427
	.update_stats = falcon_update_nic_stats,
	.start_stats = falcon_start_nic_stats,
	.stop_stats = falcon_stop_nic_stats,
2428
	.set_id_led = falcon_set_id_led,
2429
	.push_irq_moderation = falcon_push_irq_moderation,
B
Ben Hutchings 已提交
2430
	.reconfigure_port = falcon_reconfigure_port,
2431
	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2432 2433
	.reconfigure_mac = falcon_reconfigure_xmac,
	.check_mac_fault = falcon_xmac_check_fault,
2434 2435 2436
	.get_wol = falcon_get_wol,
	.set_wol = falcon_set_wol,
	.resume_wol = efx_port_dummy_op_void,
2437
	.test_chip = falcon_b0_test_chip,
2438
	.test_nvram = falcon_test_nvram,
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
	.irq_enable_master = efx_farch_irq_enable_master,
	.irq_test_generate = efx_farch_irq_test_generate,
	.irq_disable_non_ev = efx_farch_irq_disable_master,
	.irq_handle_msi = efx_farch_msi_interrupt,
	.irq_handle_legacy = efx_farch_legacy_interrupt,
	.tx_probe = efx_farch_tx_probe,
	.tx_init = efx_farch_tx_init,
	.tx_remove = efx_farch_tx_remove,
	.tx_write = efx_farch_tx_write,
	.rx_push_indir_table = efx_farch_rx_push_indir_table,
	.rx_probe = efx_farch_rx_probe,
	.rx_init = efx_farch_rx_init,
	.rx_remove = efx_farch_rx_remove,
	.rx_write = efx_farch_rx_write,
	.rx_defer_refill = efx_farch_rx_defer_refill,
	.ev_probe = efx_farch_ev_probe,
	.ev_init = efx_farch_ev_init,
	.ev_fini = efx_farch_ev_fini,
	.ev_remove = efx_farch_ev_remove,
	.ev_process = efx_farch_ev_process,
	.ev_read_ack = efx_farch_ev_read_ack,
	.ev_test_generate = efx_farch_ev_test_generate,
2461

2462
	.revision = EFX_REV_FALCON_B0,
2463 2464 2465
	/* Map everything up to and including the RSS indirection
	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
	 * requires that they not be mapped.  */
2466 2467 2468 2469 2470 2471 2472 2473
	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
			 FR_BZ_RX_INDIRECTION_TBL_STEP *
			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2474
	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2475
	.rx_buffer_hash_size = 0x10,
2476
	.rx_buffer_padding = 0,
2477
	.can_rx_scatter = true,
2478 2479 2480 2481
	.max_interrupt_mode = EFX_INT_MODE_MSIX,
	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
				   * interrupt handler only supports 32
				   * channels */
2482
	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2483
	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2484 2485
};